Merge "Assembly TLAB allocation fast path for arm64."
diff --git a/Android.mk b/Android.mk
index e762814..e89f617 100644
--- a/Android.mk
+++ b/Android.mk
@@ -270,11 +270,19 @@
test-art-host-dexdump: $(addprefix $(HOST_OUT_EXECUTABLES)/, dexdump2 dexlist)
ANDROID_HOST_OUT=$(realpath $(HOST_OUT)) art/test/dexdump/run-all-tests
-# Valgrind. Currently only 32b gtests.
+# Valgrind. Currently only 32b gtests. TODO: change this from 32-bit only to both 32-bit and 64-bit.
.PHONY: valgrind-test-art-host
valgrind-test-art-host: valgrind-test-art-host-gtest32
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
+.PHONY: valgrind-test-art-host32
+valgrind-test-art-host32: valgrind-test-art-host-gtest32
+ $(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
+
+.PHONY: valgrind-test-art-host64
+valgrind-test-art-host64: valgrind-test-art-host-gtest64
+ $(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
+
########################################################################
# target test rules
diff --git a/compiler/debug/elf_debug_loc_writer.h b/compiler/debug/elf_debug_loc_writer.h
index 2d4fff4..4712d47 100644
--- a/compiler/debug/elf_debug_loc_writer.h
+++ b/compiler/debug/elf_debug_loc_writer.h
@@ -96,12 +96,21 @@
std::vector<VariableLocation> variable_locations;
// Get stack maps sorted by pc (they might not be sorted internally).
+ // TODO(dsrbecky) Remove this once stackmaps get sorted by pc.
const CodeInfo code_info(method_info->code_info);
const StackMapEncoding encoding = code_info.ExtractEncoding();
std::map<uint32_t, uint32_t> stack_maps; // low_pc -> stack_map_index.
for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(); s++) {
StackMap stack_map = code_info.GetStackMapAt(s, encoding);
DCHECK(stack_map.IsValid());
+ if (!stack_map.HasDexRegisterMap(encoding)) {
+ // The compiler creates stackmaps without register maps at the start of
+ // basic blocks in order to keep instruction-accurate line number mapping.
+ // However, we never stop at those (breakpoint locations always have map).
+ // Therefore, for the purpose of local variables, we ignore them.
+ // The main reason for this is to save space by avoiding undefined gaps.
+ continue;
+ }
const uint32_t pc_offset = stack_map.GetNativePcOffset(encoding);
DCHECK_LE(pc_offset, method_info->code_size);
DCHECK_LE(compilation_unit_code_address, method_info->code_address);
@@ -128,6 +137,8 @@
// Check that the stack map is in the requested range.
uint32_t dex_pc = stack_map.GetDexPc(encoding);
if (!(dex_pc_low <= dex_pc && dex_pc < dex_pc_high)) {
+ // The variable is not in scope at this PC. Therefore omit the entry.
+ // Note that this is different to None() entry which means in scope, but unknown location.
continue;
}
@@ -136,13 +147,12 @@
DexRegisterLocation reg_hi = DexRegisterLocation::None();
DCHECK_LT(stack_map_index, dex_register_maps.size());
DexRegisterMap dex_register_map = dex_register_maps[stack_map_index];
- if (dex_register_map.IsValid()) {
- reg_lo = dex_register_map.GetDexRegisterLocation(
- vreg, method_info->code_item->registers_size_, code_info, encoding);
- if (is64bitValue) {
- reg_hi = dex_register_map.GetDexRegisterLocation(
- vreg + 1, method_info->code_item->registers_size_, code_info, encoding);
- }
+ DCHECK(dex_register_map.IsValid());
+ reg_lo = dex_register_map.GetDexRegisterLocation(
+ vreg, method_info->code_item->registers_size_, code_info, encoding);
+ if (is64bitValue) {
+ reg_hi = dex_register_map.GetDexRegisterLocation(
+ vreg + 1, method_info->code_item->registers_size_, code_info, encoding);
}
// Add location entry for this address range.
@@ -152,9 +162,6 @@
variable_locations.back().high_pc == low_pc) {
// Merge with the previous entry (extend its range).
variable_locations.back().high_pc = high_pc;
- } else if (!variable_locations.empty() && reg_lo == DexRegisterLocation::None()) {
- // Unknown location - use the last known location as best-effort guess.
- variable_locations.back().high_pc = high_pc;
} else {
variable_locations.push_back({low_pc, high_pc, reg_lo, reg_hi});
}
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index ef44a6f..26ab281 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -841,7 +841,7 @@
load.p_type = PT_LOAD;
load.p_flags = PF_R;
load.p_offset = load.p_vaddr = load.p_paddr = 0;
- load.p_filesz = load.p_memsz = sections_[0]->header_.sh_offset;
+ load.p_filesz = load.p_memsz = sizeof(Elf_Ehdr) + sizeof(Elf_Phdr) * kMaxProgramHeaders;
load.p_align = kPageSize;
phdrs.push_back(load);
}
diff --git a/compiler/elf_writer.cc b/compiler/elf_writer.cc
index 4219d97..ca0869a 100644
--- a/compiler/elf_writer.cc
+++ b/compiler/elf_writer.cc
@@ -42,7 +42,11 @@
size_t* oat_loaded_size,
size_t* oat_data_offset) {
std::string error_msg;
- std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file, false, false, &error_msg));
+ std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file,
+ false,
+ false,
+ /*low_4gb*/false,
+ &error_msg));
CHECK(elf_file.get() != nullptr) << error_msg;
bool success = elf_file->GetLoadedSize(oat_loaded_size, &error_msg);
@@ -54,7 +58,7 @@
bool ElfWriter::Fixup(File* file, uintptr_t oat_data_begin) {
std::string error_msg;
- std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file, true, false, &error_msg));
+ std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file, true, false, /*low_4gb*/false, &error_msg));
CHECK(elf_file.get() != nullptr) << error_msg;
// Lookup "oatdata" symbol address.
diff --git a/compiler/elf_writer_test.cc b/compiler/elf_writer_test.cc
index 7cf774e..449f514 100644
--- a/compiler/elf_writer_test.cc
+++ b/compiler/elf_writer_test.cc
@@ -64,7 +64,11 @@
ASSERT_TRUE(file.get() != nullptr);
{
std::string error_msg;
- std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(), false, false, &error_msg));
+ std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(),
+ false,
+ false,
+ /*low_4gb*/false,
+ &error_msg));
CHECK(ef.get() != nullptr) << error_msg;
EXPECT_ELF_FILE_ADDRESS(ef, dl_oatdata, "oatdata", false);
EXPECT_ELF_FILE_ADDRESS(ef, dl_oatexec, "oatexec", false);
@@ -72,7 +76,11 @@
}
{
std::string error_msg;
- std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(), false, false, &error_msg));
+ std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(),
+ false,
+ false,
+ /*low_4gb*/false,
+ &error_msg));
CHECK(ef.get() != nullptr) << error_msg;
EXPECT_ELF_FILE_ADDRESS(ef, dl_oatdata, "oatdata", true);
EXPECT_ELF_FILE_ADDRESS(ef, dl_oatexec, "oatexec", true);
@@ -80,9 +88,13 @@
}
{
std::string error_msg;
- std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(), false, true, &error_msg));
+ std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(),
+ false,
+ true,
+ /*low_4gb*/false,
+ &error_msg));
CHECK(ef.get() != nullptr) << error_msg;
- CHECK(ef->Load(false, &error_msg)) << error_msg;
+ CHECK(ef->Load(false, /*low_4gb*/false, &error_msg)) << error_msg;
EXPECT_EQ(dl_oatdata, ef->FindDynamicSymbolAddress("oatdata"));
EXPECT_EQ(dl_oatexec, ef->FindDynamicSymbolAddress("oatexec"));
EXPECT_EQ(dl_oatlastword, ef->FindDynamicSymbolAddress("oatlastword"));
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index b1b971f..0b69810 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -1542,15 +1542,16 @@
}
case kNativeObjectRelocationTypeArtMethodArrayClean:
case kNativeObjectRelocationTypeArtMethodArrayDirty: {
- memcpy(dest, pair.first, LengthPrefixedArray<ArtMethod>::ComputeSize(
- 0,
- ArtMethod::Size(target_ptr_size_),
- ArtMethod::Alignment(target_ptr_size_)));
+ size_t size = ArtMethod::Size(target_ptr_size_);
+ size_t alignment = ArtMethod::Alignment(target_ptr_size_);
+ memcpy(dest, pair.first, LengthPrefixedArray<ArtMethod>::ComputeSize(0, size, alignment));
+ // Clear padding to avoid non-deterministic data in the image (and placate valgrind).
+ reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(dest)->ClearPadding(size, alignment);
break;
+ }
case kNativeObjectRelocationTypeDexCacheArray:
// Nothing to copy here, everything is done in FixupDexCache().
break;
- }
}
}
// Fixup the image method roots.
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index d22044a..4b48107 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -230,7 +230,7 @@
return elf_writer->End();
}
- void TestDexFileInput(bool verify);
+ void TestDexFileInput(bool verify, bool low_4gb);
void TestZipFileInput(bool verify);
std::unique_ptr<const InstructionSetFeatures> insn_features_;
@@ -374,8 +374,14 @@
if (kCompile) { // OatWriter strips the code, regenerate to compare
compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings);
}
- std::unique_ptr<OatFile> oat_file(OatFile::Open(tmp.GetFilename(), tmp.GetFilename(), nullptr,
- nullptr, false, nullptr, &error_msg));
+ std::unique_ptr<OatFile> oat_file(OatFile::Open(tmp.GetFilename(),
+ tmp.GetFilename(),
+ nullptr,
+ nullptr,
+ false,
+ /*low_4gb*/true,
+ nullptr,
+ &error_msg));
ASSERT_TRUE(oat_file.get() != nullptr) << error_msg;
const OatHeader& oat_header = oat_file->GetOatHeader();
ASSERT_TRUE(oat_header.IsValid());
@@ -504,6 +510,7 @@
nullptr,
nullptr,
false,
+ /*low_4gb*/false,
nullptr,
&error_msg));
ASSERT_TRUE(oat_file != nullptr);
@@ -518,7 +525,7 @@
}
}
-void OatTest::TestDexFileInput(bool verify) {
+void OatTest::TestDexFileInput(bool verify, bool low_4gb) {
TimingLogger timings("OatTest::DexFileInput", false, false);
std::vector<const char*> input_filenames;
@@ -572,8 +579,13 @@
nullptr,
nullptr,
false,
+ low_4gb,
nullptr,
&error_msg));
+ if (low_4gb) {
+ uintptr_t begin = reinterpret_cast<uintptr_t>(opened_oat_file->Begin());
+ EXPECT_EQ(begin, static_cast<uint32_t>(begin));
+ }
ASSERT_TRUE(opened_oat_file != nullptr);
ASSERT_EQ(2u, opened_oat_file->GetOatDexFiles().size());
std::unique_ptr<const DexFile> opened_dex_file1 =
@@ -595,11 +607,15 @@
}
TEST_F(OatTest, DexFileInputCheckOutput) {
- TestDexFileInput(false);
+ TestDexFileInput(false, /*low_4gb*/false);
+}
+
+TEST_F(OatTest, DexFileInputCheckOutputLow4GB) {
+ TestDexFileInput(false, /*low_4gb*/true);
}
TEST_F(OatTest, DexFileInputCheckVerifier) {
- TestDexFileInput(true);
+ TestDexFileInput(true, /*low_4gb*/false);
}
void OatTest::TestZipFileInput(bool verify) {
@@ -667,6 +683,7 @@
nullptr,
nullptr,
false,
+ /*low_4gb*/false,
nullptr,
&error_msg));
ASSERT_TRUE(opened_oat_file != nullptr);
@@ -714,6 +731,7 @@
nullptr,
nullptr,
false,
+ /*low_4gb*/false,
nullptr,
&error_msg));
ASSERT_TRUE(opened_oat_file != nullptr);
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index f2929bc..084360f 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -535,6 +535,7 @@
graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
dynamic_bce_standby_(
graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
+ record_dynamic_bce_standby_(true),
early_exit_loop_(
std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
@@ -556,6 +557,7 @@
void Finish() {
// Retry dynamic bce candidates on standby that are still in the graph.
+ record_dynamic_bce_standby_ = false;
for (HBoundsCheck* bounds_check : dynamic_bce_standby_) {
if (bounds_check->IsInBlock()) {
TryDynamicBCE(bounds_check);
@@ -1191,7 +1193,7 @@
if (!array_length->IsArrayLength()) {
continue; // disregard phis and constants
}
- // Collect all bounds checks are still there and that are related as "a[base + constant]"
+ // Collect all bounds checks that are still there and that are related as "a[base + constant]"
// for a base instruction (possibly absent) and various constants. Note that no attempt
// is made to partition the set into matching subsets (viz. a[0], a[1] and a[base+1] and
// a[base+2] are considered as one set).
@@ -1214,7 +1216,12 @@
HInstruction* other_array_length = other_bounds_check->InputAt(1);
ValueBound other_value = ValueBound::AsValueBound(other_index);
if (array_length == other_array_length && base == other_value.GetInstruction()) {
- int32_t other_c = other_value.GetConstant();
+ // Reject certain OOB if BoundsCheck(l, l) occurs on considered subset.
+ if (array_length == other_index) {
+ candidates.clear();
+ standby.clear();
+ break;
+ }
// Since a subsequent dominated block could be under a conditional, only accept
// the other bounds check if it is in same block or both blocks dominate the exit.
// TODO: we could improve this by testing proper post-dominance, or even if this
@@ -1222,6 +1229,7 @@
HBasicBlock* exit = GetGraph()->GetExitBlock();
if (block == user->GetBlock() ||
(block->Dominates(exit) && other_block->Dominates(exit))) {
+ int32_t other_c = other_value.GetConstant();
min_c = std::min(min_c, other_c);
max_c = std::max(max_c, other_c);
candidates.push_back(other_bounds_check);
@@ -1251,7 +1259,11 @@
distance <= kMaxLengthForAddingDeoptimize) { // reject likely/certain deopt
AddCompareWithDeoptimization(block, array_length, base, min_c, max_c);
for (HInstruction* other_bounds_check : candidates) {
- ReplaceInstruction(other_bounds_check, other_bounds_check->InputAt(0));
+ // Only replace if still in the graph. This avoids visiting the same
+ // bounds check twice if it occurred multiple times in the use list.
+ if (other_bounds_check->IsInBlock()) {
+ ReplaceInstruction(other_bounds_check, other_bounds_check->InputAt(0));
+ }
}
}
}
@@ -1467,7 +1479,9 @@
}
// If bounds check made it this far, it is worthwhile to check later if
// the loop was forced finite by another candidate.
- dynamic_bce_standby_.push_back(check);
+ if (record_dynamic_bce_standby_) {
+ dynamic_bce_standby_.push_back(check);
+ }
return false;
}
return true;
@@ -1691,6 +1705,7 @@
// Stand by list for dynamic bce.
ArenaVector<HBoundsCheck*> dynamic_bce_standby_;
+ bool record_dynamic_bce_standby_;
// Early-exit loop bookkeeping.
ArenaSafeMap<uint32_t, bool> early_exit_loop_;
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 049901b..9cfc16f 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -235,7 +235,10 @@
HInstruction* input_other = instruction->GetLeastConstantLeft();
if (input_cst != nullptr) {
- if (input_cst->IsZero()) {
+ int64_t cst = Int64FromConstant(input_cst);
+ int64_t mask =
+ (input_other->GetType() == Primitive::kPrimLong) ? kMaxLongShiftValue : kMaxIntShiftValue;
+ if ((cst & mask) == 0) {
// Replace code looking like
// SHL dst, src, 0
// with
@@ -469,7 +472,9 @@
return;
}
- bool outcome;
+ // Note: The `outcome` is initialized to please valgrind - the compiler can reorder
+ // the return value check with the `outcome` check, b/27651442 .
+ bool outcome = false;
if (TypeCheckHasKnownOutcome(load_class, object, &outcome)) {
if (outcome) {
check_cast->GetBlock()->RemoveInstruction(check_cast);
@@ -511,7 +516,9 @@
return;
}
- bool outcome;
+ // Note: The `outcome` is initialized to please valgrind - the compiler can reorder
+ // the return value check with the `outcome` check, b/27651442 .
+ bool outcome = false;
if (TypeCheckHasKnownOutcome(load_class, object, &outcome)) {
if (outcome && can_be_null) {
// Type test will succeed, we just need a null test.
@@ -867,9 +874,7 @@
return;
}
}
- } else if (input->IsAnd() &&
- Primitive::IsIntegralType(result_type) &&
- input->HasOnlyOneNonEnvironmentUse()) {
+ } else if (input->IsAnd() && Primitive::IsIntegralType(result_type)) {
DCHECK(Primitive::IsIntegralType(input_type));
HAnd* input_and = input->AsAnd();
HConstant* constant = input_and->GetConstantRight();
@@ -879,10 +884,18 @@
size_t trailing_ones = CTZ(~static_cast<uint64_t>(value));
if (trailing_ones >= kBitsPerByte * Primitive::ComponentSize(result_type)) {
// The `HAnd` is useless, for example in `(byte) (x & 0xff)`, get rid of it.
- input_and->ReplaceWith(input_and->GetLeastConstantLeft());
- input_and->GetBlock()->RemoveInstruction(input_and);
- RecordSimplification();
- return;
+ HInstruction* original_input = input_and->GetLeastConstantLeft();
+ if (IsTypeConversionImplicit(original_input->GetType(), result_type)) {
+ instruction->ReplaceWith(original_input);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ RecordSimplification();
+ return;
+ } else if (input->HasOnlyOneNonEnvironmentUse()) {
+ input_and->ReplaceWith(original_input);
+ input_and->GetBlock()->RemoveInstruction(input_and);
+ RecordSimplification();
+ return;
+ }
}
}
}
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 3ed5766..9ac18e8 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -103,9 +103,11 @@
template <typename ElfTypes>
class OatSymbolizer FINAL {
public:
- OatSymbolizer(const OatFile* oat_file, const std::string& output_name) :
- oat_file_(oat_file), builder_(nullptr),
- output_name_(output_name.empty() ? "symbolized.oat" : output_name) {
+ OatSymbolizer(const OatFile* oat_file, const std::string& output_name, bool no_bits) :
+ oat_file_(oat_file),
+ builder_(nullptr),
+ output_name_(output_name.empty() ? "symbolized.oat" : output_name),
+ no_bits_(no_bits) {
}
bool Symbolize() {
@@ -124,17 +126,25 @@
auto* text = builder_->GetText();
auto* bss = builder_->GetBss();
- rodata->Start();
const uint8_t* rodata_begin = oat_file_->Begin();
const size_t rodata_size = oat_file_->GetOatHeader().GetExecutableOffset();
- rodata->WriteFully(rodata_begin, rodata_size);
- rodata->End();
+ if (no_bits_) {
+ rodata->WriteNoBitsSection(rodata_size);
+ } else {
+ rodata->Start();
+ rodata->WriteFully(rodata_begin, rodata_size);
+ rodata->End();
+ }
- text->Start();
const uint8_t* text_begin = oat_file_->Begin() + rodata_size;
const size_t text_size = oat_file_->End() - text_begin;
- text->WriteFully(text_begin, text_size);
- text->End();
+ if (no_bits_) {
+ text->WriteNoBitsSection(text_size);
+ } else {
+ text->Start();
+ text->WriteFully(text_begin, text_size);
+ text->End();
+ }
if (oat_file_->BssSize() != 0) {
bss->WriteNoBitsSection(oat_file_->BssSize());
@@ -264,6 +274,7 @@
std::vector<debug::MethodDebugInfo> method_debug_infos_;
std::unordered_set<uint32_t> seen_offsets_;
const std::string output_name_;
+ bool no_bits_;
};
class OatDumperOptions {
@@ -280,6 +291,8 @@
bool list_methods,
bool dump_header_only,
const char* export_dex_location,
+ const char* app_image,
+ const char* app_oat,
uint32_t addr2instr)
: dump_raw_mapping_table_(dump_raw_mapping_table),
dump_raw_gc_map_(dump_raw_gc_map),
@@ -293,6 +306,8 @@
list_methods_(list_methods),
dump_header_only_(dump_header_only),
export_dex_location_(export_dex_location),
+ app_image_(app_image),
+ app_oat_(app_oat),
addr2instr_(addr2instr),
class_loader_(nullptr) {}
@@ -308,6 +323,8 @@
const bool list_methods_;
const bool dump_header_only_;
const char* const export_dex_location_;
+ const char* const app_image_;
+ const char* const app_oat_;
uint32_t addr2instr_;
Handle<mirror::ClassLoader>* class_loader_;
};
@@ -1433,8 +1450,10 @@
class ImageDumper {
public:
- ImageDumper(std::ostream* os, gc::space::ImageSpace& image_space,
- const ImageHeader& image_header, OatDumperOptions* oat_dumper_options)
+ ImageDumper(std::ostream* os,
+ gc::space::ImageSpace& image_space,
+ const ImageHeader& image_header,
+ OatDumperOptions* oat_dumper_options)
: os_(os),
vios_(os),
indent1_(&vios_),
@@ -1532,16 +1551,23 @@
os << "OAT LOCATION: " << oat_location;
os << "\n";
std::string error_msg;
- const OatFile* oat_file = runtime->GetOatFileManager().FindOpenedOatFileFromOatLocation(
- oat_location);
+ const OatFile* oat_file = image_space_.GetOatFile();
if (oat_file == nullptr) {
- oat_file = OatFile::Open(oat_location, oat_location,
- nullptr, nullptr, false, nullptr,
+ oat_file = runtime->GetOatFileManager().FindOpenedOatFileFromOatLocation(oat_location);
+ }
+ if (oat_file == nullptr) {
+ oat_file = OatFile::Open(oat_location,
+ oat_location,
+ nullptr,
+ nullptr,
+ false,
+ /*low_4gb*/false,
+ nullptr,
&error_msg);
- if (oat_file == nullptr) {
- os << "NOT FOUND: " << error_msg << "\n";
- return false;
- }
+ }
+ if (oat_file == nullptr) {
+ os << "OAT FILE NOT FOUND: " << error_msg << "\n";
+ return EXIT_FAILURE;
}
os << "\n";
@@ -1592,21 +1618,27 @@
// TODO: Dump fields.
// Dump methods after.
const auto& methods_section = image_header_.GetMethodsSection();
- const size_t pointer_size =
- InstructionSetPointerSize(oat_dumper_->GetOatInstructionSet());
DumpArtMethodVisitor visitor(this);
- methods_section.VisitPackedArtMethods(&visitor, image_space_.Begin(), pointer_size);
+ methods_section.VisitPackedArtMethods(&visitor,
+ image_space_.Begin(),
+ image_header_.GetPointerSize());
// Dump the large objects separately.
heap->GetLargeObjectsSpace()->GetLiveBitmap()->Walk(ImageDumper::Callback, this);
indent_os << "\n";
}
os << "STATS:\n" << std::flush;
std::unique_ptr<File> file(OS::OpenFileForReading(image_filename.c_str()));
- if (file.get() == nullptr) {
+ size_t data_size = image_header_.GetDataSize(); // stored size in file.
+ if (file == nullptr) {
LOG(WARNING) << "Failed to find image in " << image_filename;
- }
- if (file.get() != nullptr) {
+ } else {
stats_.file_bytes = file->GetLength();
+ // If the image is compressed, adjust to decompressed size.
+ size_t uncompressed_size = image_header_.GetImageSize() - sizeof(ImageHeader);
+ if (image_header_.GetStorageMode() == ImageHeader::kStorageModeUncompressed) {
+ DCHECK_EQ(uncompressed_size, data_size) << "Sizes should match for uncompressed image";
+ }
+ stats_.file_bytes += uncompressed_size - data_size;
}
size_t header_bytes = sizeof(ImageHeader);
const auto& object_section = image_header_.GetImageSection(ImageHeader::kSectionObjects);
@@ -1653,10 +1685,10 @@
uint32_t end_intern = intern_section.Offset() + intern_section.Size();
stats_.alignment_bytes += class_table_section.Offset() - end_intern;
- // Add space between class table and bitmap. Expect the bitmap to be page-aligned.
- uint32_t end_ctable = class_table_section.Offset() + class_table_section.Size();
+ // Add space between end of image data and bitmap. Expect the bitmap to be page-aligned.
+ const size_t bitmap_offset = sizeof(ImageHeader) + data_size;
CHECK_ALIGNED(bitmap_section.Offset(), kPageSize);
- stats_.alignment_bytes += bitmap_section.Offset() - end_ctable;
+ stats_.alignment_bytes += RoundUp(bitmap_offset, kPageSize) - bitmap_offset;
stats_.bitmap_bytes += bitmap_section.Size();
stats_.art_field_bytes += field_section.Size();
@@ -1680,7 +1712,7 @@
virtual void Visit(ArtMethod* method) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
std::ostream& indent_os = image_dumper_->vios_.Stream();
indent_os << method << " " << " ArtMethod: " << PrettyMethod(method) << "\n";
- image_dumper_->DumpMethod(method, image_dumper_, indent_os);
+ image_dumper_->DumpMethod(method, indent_os);
indent_os << "\n";
}
@@ -1773,10 +1805,9 @@
return image_space_.Contains(object);
}
- const void* GetQuickOatCodeBegin(ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ const void* GetQuickOatCodeBegin(ArtMethod* m) SHARED_REQUIRES(Locks::mutator_lock_) {
const void* quick_code = m->GetEntryPointFromQuickCompiledCodePtrSize(
- InstructionSetPointerSize(oat_dumper_->GetOatInstructionSet()));
+ image_header_.GetPointerSize());
if (Runtime::Current()->GetClassLinker()->IsQuickResolutionStub(quick_code)) {
quick_code = oat_dumper_->GetQuickOatCode(m);
}
@@ -1835,8 +1866,7 @@
}
ScopedIndentation indent1(&state->vios_);
DumpFields(os, obj, obj_class);
- const auto image_pointer_size =
- InstructionSetPointerSize(state->oat_dumper_->GetOatInstructionSet());
+ const size_t image_pointer_size = state->image_header_.GetPointerSize();
if (obj->IsObjectArray()) {
auto* obj_array = obj->AsObjectArray<mirror::Object>();
for (int32_t i = 0, length = obj_array->GetLength(); i < length; i++) {
@@ -1881,7 +1911,9 @@
ScopedIndentation indent2(&state->vios_);
auto* resolved_methods = dex_cache->GetResolvedMethods();
for (size_t i = 0, length = dex_cache->NumResolvedMethods(); i < length; ++i) {
- auto* elem = mirror::DexCache::GetElementPtrSize(resolved_methods, i, image_pointer_size);
+ auto* elem = mirror::DexCache::GetElementPtrSize(resolved_methods,
+ i,
+ image_pointer_size);
size_t run = 0;
for (size_t j = i + 1;
j != length && elem == mirror::DexCache::GetElementPtrSize(resolved_methods,
@@ -1943,13 +1975,11 @@
state->stats_.Update(obj_class->GetDescriptor(&temp), object_bytes);
}
- void DumpMethod(ArtMethod* method, ImageDumper* state, std::ostream& indent_os)
+ void DumpMethod(ArtMethod* method, std::ostream& indent_os)
SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(method != nullptr);
- const auto image_pointer_size =
- InstructionSetPointerSize(state->oat_dumper_->GetOatInstructionSet());
- const void* quick_oat_code_begin = state->GetQuickOatCodeBegin(method);
- const void* quick_oat_code_end = state->GetQuickOatCodeEnd(method);
+ const void* quick_oat_code_begin = GetQuickOatCodeBegin(method);
+ const void* quick_oat_code_end = GetQuickOatCodeEnd(method);
OatQuickMethodHeader* method_header = reinterpret_cast<OatQuickMethodHeader*>(
reinterpret_cast<uintptr_t>(quick_oat_code_begin) - sizeof(OatQuickMethodHeader));
if (method->IsNative()) {
@@ -1958,13 +1988,13 @@
DCHECK(method_header->GetMappingTable() == nullptr) << PrettyMethod(method);
}
bool first_occurrence;
- uint32_t quick_oat_code_size = state->GetQuickOatCodeSize(method);
- state->ComputeOatSize(quick_oat_code_begin, &first_occurrence);
+ uint32_t quick_oat_code_size = GetQuickOatCodeSize(method);
+ ComputeOatSize(quick_oat_code_begin, &first_occurrence);
if (first_occurrence) {
- state->stats_.native_to_managed_code_bytes += quick_oat_code_size;
+ stats_.native_to_managed_code_bytes += quick_oat_code_size;
}
- if (quick_oat_code_begin !=
- method->GetEntryPointFromQuickCompiledCodePtrSize(image_pointer_size)) {
+ if (quick_oat_code_begin != method->GetEntryPointFromQuickCompiledCodePtrSize(
+ image_header_.GetPointerSize())) {
indent_os << StringPrintf("OAT CODE: %p\n", quick_oat_code_begin);
}
} else if (method->IsAbstract() || method->IsCalleeSaveMethod() ||
@@ -1973,46 +2003,44 @@
} else {
const DexFile::CodeItem* code_item = method->GetCodeItem();
size_t dex_instruction_bytes = code_item->insns_size_in_code_units_ * 2;
- state->stats_.dex_instruction_bytes += dex_instruction_bytes;
+ stats_.dex_instruction_bytes += dex_instruction_bytes;
bool first_occurrence;
- size_t gc_map_bytes = state->ComputeOatSize(
- method_header->GetNativeGcMap(), &first_occurrence);
+ size_t gc_map_bytes = ComputeOatSize(method_header->GetNativeGcMap(), &first_occurrence);
if (first_occurrence) {
- state->stats_.gc_map_bytes += gc_map_bytes;
+ stats_.gc_map_bytes += gc_map_bytes;
}
- size_t pc_mapping_table_bytes = state->ComputeOatSize(
+ size_t pc_mapping_table_bytes = ComputeOatSize(
method_header->GetMappingTable(), &first_occurrence);
if (first_occurrence) {
- state->stats_.pc_mapping_table_bytes += pc_mapping_table_bytes;
+ stats_.pc_mapping_table_bytes += pc_mapping_table_bytes;
}
size_t vmap_table_bytes = 0u;
if (!method_header->IsOptimized()) {
// Method compiled with the optimizing compiler have no vmap table.
- vmap_table_bytes = state->ComputeOatSize(
- method_header->GetVmapTable(), &first_occurrence);
+ vmap_table_bytes = ComputeOatSize(method_header->GetVmapTable(), &first_occurrence);
if (first_occurrence) {
- state->stats_.vmap_table_bytes += vmap_table_bytes;
+ stats_.vmap_table_bytes += vmap_table_bytes;
}
}
- uint32_t quick_oat_code_size = state->GetQuickOatCodeSize(method);
- state->ComputeOatSize(quick_oat_code_begin, &first_occurrence);
+ uint32_t quick_oat_code_size = GetQuickOatCodeSize(method);
+ ComputeOatSize(quick_oat_code_begin, &first_occurrence);
if (first_occurrence) {
- state->stats_.managed_code_bytes += quick_oat_code_size;
+ stats_.managed_code_bytes += quick_oat_code_size;
if (method->IsConstructor()) {
if (method->IsStatic()) {
- state->stats_.class_initializer_code_bytes += quick_oat_code_size;
+ stats_.class_initializer_code_bytes += quick_oat_code_size;
} else if (dex_instruction_bytes > kLargeConstructorDexBytes) {
- state->stats_.large_initializer_code_bytes += quick_oat_code_size;
+ stats_.large_initializer_code_bytes += quick_oat_code_size;
}
} else if (dex_instruction_bytes > kLargeMethodDexBytes) {
- state->stats_.large_method_code_bytes += quick_oat_code_size;
+ stats_.large_method_code_bytes += quick_oat_code_size;
}
}
- state->stats_.managed_code_bytes_ignoring_deduplication += quick_oat_code_size;
+ stats_.managed_code_bytes_ignoring_deduplication += quick_oat_code_size;
uint32_t method_access_flags = method->GetAccessFlags();
@@ -2022,11 +2050,11 @@
method_access_flags);
size_t total_size = dex_instruction_bytes + gc_map_bytes + pc_mapping_table_bytes +
- vmap_table_bytes + quick_oat_code_size + ArtMethod::Size(image_pointer_size);
+ vmap_table_bytes + quick_oat_code_size + ArtMethod::Size(image_header_.GetPointerSize());
double expansion =
static_cast<double>(quick_oat_code_size) / static_cast<double>(dex_instruction_bytes);
- state->stats_.ComputeOutliers(total_size, expansion, method);
+ stats_.ComputeOutliers(total_size, expansion, method);
}
}
@@ -2361,26 +2389,75 @@
DISALLOW_COPY_AND_ASSIGN(ImageDumper);
};
-static int DumpImage(Runtime* runtime, OatDumperOptions* options, std::ostream* os) {
+static int DumpImage(gc::space::ImageSpace* image_space,
+ OatDumperOptions* options,
+ std::ostream* os) SHARED_REQUIRES(Locks::mutator_lock_) {
+ const ImageHeader& image_header = image_space->GetImageHeader();
+ if (!image_header.IsValid()) {
+ fprintf(stderr, "Invalid image header %s\n", image_space->GetImageLocation().c_str());
+ return EXIT_FAILURE;
+ }
+ ImageDumper image_dumper(os, *image_space, image_header, options);
+ if (!image_dumper.Dump()) {
+ return EXIT_FAILURE;
+ }
+ return EXIT_SUCCESS;
+}
+
+static int DumpImages(Runtime* runtime, OatDumperOptions* options, std::ostream* os) {
// Dumping the image, no explicit class loader.
ScopedNullHandle<mirror::ClassLoader> null_class_loader;
options->class_loader_ = &null_class_loader;
ScopedObjectAccess soa(Thread::Current());
- gc::Heap* heap = runtime->GetHeap();
- std::vector<gc::space::ImageSpace*> image_spaces = heap->GetBootImageSpaces();
- CHECK(!image_spaces.empty());
- for (gc::space::ImageSpace* image_space : image_spaces) {
- const ImageHeader& image_header = image_space->GetImageHeader();
- if (!image_header.IsValid()) {
- fprintf(stderr, "Invalid image header %s\n", image_space->GetImageLocation().c_str());
+ if (options->app_image_ != nullptr) {
+ if (options->app_oat_ == nullptr) {
+ LOG(ERROR) << "Can not dump app image without app oat file";
return EXIT_FAILURE;
}
-
- ImageDumper image_dumper(os, *image_space, image_header, options);
- if (!image_dumper.Dump()) {
+ // We can't know if the app image is 32 bits yet, but it contains pointers into the oat file.
+ // We need to map the oat file in the low 4gb or else the fixup wont be able to fit oat file
+ // pointers into 32 bit pointer sized ArtMethods.
+ std::string error_msg;
+ std::unique_ptr<OatFile> oat_file(OatFile::Open(options->app_oat_,
+ options->app_oat_,
+ nullptr,
+ nullptr,
+ false,
+ /*low_4gb*/true,
+ nullptr,
+ &error_msg));
+ if (oat_file == nullptr) {
+ LOG(ERROR) << "Failed to open oat file " << options->app_oat_ << " with error " << error_msg;
return EXIT_FAILURE;
}
+ std::unique_ptr<gc::space::ImageSpace> space(
+ gc::space::ImageSpace::CreateFromAppImage(options->app_image_, oat_file.get(), &error_msg));
+ if (space == nullptr) {
+ LOG(ERROR) << "Failed to open app image " << options->app_image_ << " with error "
+ << error_msg;
+ }
+ // Open dex files for the image.
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ if (!runtime->GetClassLinker()->OpenImageDexFiles(space.get(), &dex_files, &error_msg)) {
+ LOG(ERROR) << "Failed to open app image dex files " << options->app_image_ << " with error "
+ << error_msg;
+ }
+ // Dump the actual image.
+ int result = DumpImage(space.get(), options, os);
+ if (result != EXIT_SUCCESS) {
+ return result;
+ }
+ // Fall through to dump the boot images.
+ }
+
+ gc::Heap* heap = runtime->GetHeap();
+ CHECK(heap->HasBootImageSpace()) << "No image spaces";
+ for (gc::space::ImageSpace* image_space : heap->GetBootImageSpaces()) {
+ int result = DumpImage(image_space, options, os);
+ if (result != EXIT_SUCCESS) {
+ return result;
+ }
}
return EXIT_SUCCESS;
}
@@ -2436,8 +2513,14 @@
static int DumpOat(Runtime* runtime, const char* oat_filename, OatDumperOptions* options,
std::ostream* os) {
std::string error_msg;
- OatFile* oat_file = OatFile::Open(oat_filename, oat_filename, nullptr, nullptr, false,
- nullptr, &error_msg);
+ OatFile* oat_file = OatFile::Open(oat_filename,
+ oat_filename,
+ nullptr,
+ nullptr,
+ false,
+ /*low_4gb*/false,
+ nullptr,
+ &error_msg);
if (oat_file == nullptr) {
fprintf(stderr, "Failed to open oat file from '%s': %s\n", oat_filename, error_msg.c_str());
return EXIT_FAILURE;
@@ -2450,10 +2533,16 @@
}
}
-static int SymbolizeOat(const char* oat_filename, std::string& output_name) {
+static int SymbolizeOat(const char* oat_filename, std::string& output_name, bool no_bits) {
std::string error_msg;
- OatFile* oat_file = OatFile::Open(oat_filename, oat_filename, nullptr, nullptr, false,
- nullptr, &error_msg);
+ OatFile* oat_file = OatFile::Open(oat_filename,
+ oat_filename,
+ nullptr,
+ nullptr,
+ false,
+ /*low_4gb*/false,
+ nullptr,
+ &error_msg);
if (oat_file == nullptr) {
fprintf(stderr, "Failed to open oat file from '%s': %s\n", oat_filename, error_msg.c_str());
return EXIT_FAILURE;
@@ -2463,10 +2552,10 @@
// Try to produce an ELF file of the same type. This is finicky, as we have used 32-bit ELF
// files for 64-bit code in the past.
if (Is64BitInstructionSet(oat_file->GetOatHeader().GetInstructionSet())) {
- OatSymbolizer<ElfTypes64> oat_symbolizer(oat_file, output_name);
+ OatSymbolizer<ElfTypes64> oat_symbolizer(oat_file, output_name, no_bits);
result = oat_symbolizer.Symbolize();
} else {
- OatSymbolizer<ElfTypes32> oat_symbolizer(oat_file, output_name);
+ OatSymbolizer<ElfTypes32> oat_symbolizer(oat_file, output_name, no_bits);
result = oat_symbolizer.Symbolize();
}
if (!result) {
@@ -2509,6 +2598,8 @@
} else if (option.starts_with("--symbolize=")) {
oat_filename_ = option.substr(strlen("--symbolize=")).data();
symbolize_ = true;
+ } else if (option.starts_with("--only-keep-debug")) {
+ only_keep_debug_ = true;
} else if (option.starts_with("--class-filter=")) {
class_filter_ = option.substr(strlen("--class-filter=")).data();
} else if (option.starts_with("--method-filter=")) {
@@ -2524,6 +2615,10 @@
*error_msg = "Address conversion failed";
return kParseError;
}
+ } else if (option.starts_with("--app-image=")) {
+ app_image_ = option.substr(strlen("--app-image=")).data();
+ } else if (option.starts_with("--app-oat=")) {
+ app_oat_ = option.substr(strlen("--app-oat=")).data();
} else {
return kParseUnknownArgument;
}
@@ -2569,6 +2664,13 @@
"\n"
" --image=<file.art>: specifies an input image location.\n"
" Example: --image=/system/framework/boot.art\n"
+ "\n"
+ " --app-image=<file.art>: specifies an input app image. Must also have a specified\n"
+ " boot image and app oat file.\n"
+ " Example: --app-image=app.art\n"
+ "\n"
+ " --app-oat=<file.odex>: specifies an input app oat.\n"
+ " Example: --app-oat=app.odex\n"
"\n";
usage += Base::GetUsage();
@@ -2603,6 +2705,10 @@
" --symbolize=<file.oat>: output a copy of file.oat with elf symbols included.\n"
" Example: --symbolize=/system/framework/boot.oat\n"
"\n"
+ " --only-keep-debug<file.oat>: Modifies the behaviour of --symbolize so that\n"
+ " .rodata and .text sections are omitted in the output file to save space.\n"
+ " Example: --symbolize=/system/framework/boot.oat --only-keep-debug\n"
+ "\n"
" --class-filter=<class name>: only dumps classes that contain the filter.\n"
" Example: --class-filter=com.example.foo\n"
"\n"
@@ -2632,11 +2738,14 @@
bool dump_code_info_stack_maps_ = false;
bool disassemble_code_ = true;
bool symbolize_ = false;
+ bool only_keep_debug_ = false;
bool list_classes_ = false;
bool list_methods_ = false;
bool dump_header_only_ = false;
uint32_t addr2instr_ = 0;
const char* export_dex_location_ = nullptr;
+ const char* app_image_ = nullptr;
+ const char* app_oat_ = nullptr;
};
struct OatdumpMain : public CmdlineMain<OatdumpArgs> {
@@ -2646,7 +2755,7 @@
// If we are only doing the oat file, disable absolute_addresses. Keep them for image dumping.
bool absolute_addresses = (args_->oat_filename_ == nullptr);
- oat_dumper_options_ = std::unique_ptr<OatDumperOptions>(new OatDumperOptions(
+ oat_dumper_options_.reset(new OatDumperOptions(
args_->dump_raw_mapping_table_,
args_->dump_raw_gc_map_,
args_->dump_vmap_,
@@ -2659,6 +2768,8 @@
args_->list_methods_,
args_->dump_header_only_,
args_->export_dex_location_,
+ args_->app_image_,
+ args_->app_oat_,
args_->addr2instr_));
return (args_->boot_image_location_ != nullptr || args_->image_location_ != nullptr) &&
@@ -2672,7 +2783,12 @@
MemMap::Init();
if (args_->symbolize_) {
- return SymbolizeOat(args_->oat_filename_, args_->output_name_) == EXIT_SUCCESS;
+ // ELF has special kind of section called SHT_NOBITS which allows us to create
+ // sections which exist but their data is omitted from the ELF file to save space.
+ // This is what "strip --only-keep-debug" does when it creates separate ELF file
+ // with only debug data. We use it in similar way to exclude .rodata and .text.
+ bool no_bits = args_->only_keep_debug_;
+ return SymbolizeOat(args_->oat_filename_, args_->output_name_, no_bits) == EXIT_SUCCESS;
} else {
return DumpOat(nullptr,
args_->oat_filename_,
@@ -2691,7 +2807,7 @@
args_->os_) == EXIT_SUCCESS;
}
- return DumpImage(runtime, oat_dumper_options_.get(), args_->os_) == EXIT_SUCCESS;
+ return DumpImages(runtime, oat_dumper_options_.get(), args_->os_) == EXIT_SUCCESS;
}
std::unique_ptr<OatDumperOptions> oat_dumper_options_;
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 84660a3..f70e696 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -140,7 +140,6 @@
native/java_lang_Class.cc \
native/java_lang_DexCache.cc \
native/java_lang_Object.cc \
- native/java_lang_Runtime.cc \
native/java_lang_String.cc \
native/java_lang_StringFactory.cc \
native/java_lang_System.cc \
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index ebe89bb..8541210 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -473,39 +473,40 @@
}
template <typename Visitor>
-inline void ArtMethod::UpdateObjectsForImageRelocation(const Visitor& visitor) {
+inline void ArtMethod::UpdateObjectsForImageRelocation(const Visitor& visitor,
+ size_t pointer_size) {
mirror::Class* old_class = GetDeclaringClassUnchecked<kWithoutReadBarrier>();
mirror::Class* new_class = visitor(old_class);
if (old_class != new_class) {
SetDeclaringClass(new_class);
}
- ArtMethod** old_methods = GetDexCacheResolvedMethods(sizeof(void*));
+ ArtMethod** old_methods = GetDexCacheResolvedMethods(pointer_size);
ArtMethod** new_methods = visitor(old_methods);
if (old_methods != new_methods) {
- SetDexCacheResolvedMethods(new_methods, sizeof(void*));
+ SetDexCacheResolvedMethods(new_methods, pointer_size);
}
- GcRoot<mirror::Class>* old_types = GetDexCacheResolvedTypes(sizeof(void*));
+ GcRoot<mirror::Class>* old_types = GetDexCacheResolvedTypes(pointer_size);
GcRoot<mirror::Class>* new_types = visitor(old_types);
if (old_types != new_types) {
- SetDexCacheResolvedTypes(new_types, sizeof(void*));
+ SetDexCacheResolvedTypes(new_types, pointer_size);
}
}
template <ReadBarrierOption kReadBarrierOption, typename Visitor>
-inline void ArtMethod::UpdateEntrypoints(const Visitor& visitor) {
+inline void ArtMethod::UpdateEntrypoints(const Visitor& visitor, size_t pointer_size) {
if (IsNative<kReadBarrierOption>()) {
- const void* old_native_code = GetEntryPointFromJni();
+ const void* old_native_code = GetEntryPointFromJniPtrSize(pointer_size);
const void* new_native_code = visitor(old_native_code);
if (old_native_code != new_native_code) {
- SetEntryPointFromJni(new_native_code);
+ SetEntryPointFromJniPtrSize(new_native_code, pointer_size);
}
} else {
- DCHECK(GetEntryPointFromJni() == nullptr);
+ DCHECK(GetEntryPointFromJniPtrSize(pointer_size) == nullptr);
}
- const void* old_code = GetEntryPointFromQuickCompiledCode();
+ const void* old_code = GetEntryPointFromQuickCompiledCodePtrSize(pointer_size);
const void* new_code = visitor(old_code);
if (old_code != new_code) {
- SetEntryPointFromQuickCompiledCode(new_code);
+ SetEntryPointFromQuickCompiledCodePtrSize(new_code, pointer_size);
}
}
diff --git a/runtime/art_method.h b/runtime/art_method.h
index ec00a7b..5ca362c 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -490,12 +490,12 @@
// Update heap objects and non-entrypoint pointers by the passed in visitor for image relocation.
// Does not use read barrier.
template <typename Visitor>
- ALWAYS_INLINE void UpdateObjectsForImageRelocation(const Visitor& visitor)
+ ALWAYS_INLINE void UpdateObjectsForImageRelocation(const Visitor& visitor, size_t pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Update entry points by passing them through the visitor.
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
- ALWAYS_INLINE void UpdateEntrypoints(const Visitor& visitor);
+ ALWAYS_INLINE void UpdateEntrypoints(const Visitor& visitor, size_t pointer_size);
protected:
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
diff --git a/runtime/base/length_prefixed_array.h b/runtime/base/length_prefixed_array.h
index d632871..8060263 100644
--- a/runtime/base/length_prefixed_array.h
+++ b/runtime/base/length_prefixed_array.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_BASE_LENGTH_PREFIXED_ARRAY_H_
#include <stddef.h> // for offsetof()
+#include <string.h> // for memset()
#include "stride_iterator.h"
#include "base/bit_utils.h"
@@ -84,6 +85,13 @@
size_ = dchecked_integral_cast<uint32_t>(length);
}
+ // Clear the potentially uninitialized padding between the size_ and actual data.
+ void ClearPadding(size_t element_size = sizeof(T), size_t alignment = alignof(T)) {
+ size_t gap_offset = offsetof(LengthPrefixedArray<T>, data);
+ size_t gap_size = OffsetOfElement(0, element_size, alignment) - gap_offset;
+ memset(reinterpret_cast<uint8_t*>(this) + gap_offset, 0, gap_size);
+ }
+
private:
T& AtUnchecked(size_t index, size_t element_size, size_t alignment) {
return *reinterpret_cast<T*>(
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index d51a1f7..52beb15 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1462,6 +1462,64 @@
const bool forward_strings_;
};
+static std::unique_ptr<const DexFile> OpenOatDexFile(const OatFile* oat_file,
+ const char* location,
+ std::string* error_msg)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(error_msg != nullptr);
+ std::unique_ptr<const DexFile> dex_file;
+ const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(location, nullptr);
+ if (oat_dex_file == nullptr) {
+ *error_msg = StringPrintf("Failed finding oat dex file for %s %s",
+ oat_file->GetLocation().c_str(),
+ location);
+ return std::unique_ptr<const DexFile>();
+ }
+ std::string inner_error_msg;
+ dex_file = oat_dex_file->OpenDexFile(&inner_error_msg);
+ if (dex_file == nullptr) {
+ *error_msg = StringPrintf("Failed to open dex file %s from within oat file %s error '%s'",
+ location,
+ oat_file->GetLocation().c_str(),
+ inner_error_msg.c_str());
+ return std::unique_ptr<const DexFile>();
+ }
+
+ if (dex_file->GetLocationChecksum() != oat_dex_file->GetDexFileLocationChecksum()) {
+ *error_msg = StringPrintf("Checksums do not match for %s: %x vs %x",
+ location,
+ dex_file->GetLocationChecksum(),
+ oat_dex_file->GetDexFileLocationChecksum());
+ return std::unique_ptr<const DexFile>();
+ }
+ return dex_file;
+}
+
+bool ClassLinker::OpenImageDexFiles(gc::space::ImageSpace* space,
+ std::vector<std::unique_ptr<const DexFile>>* out_dex_files,
+ std::string* error_msg) {
+ ScopedAssertNoThreadSuspension nts(Thread::Current(), __FUNCTION__);
+ const ImageHeader& header = space->GetImageHeader();
+ mirror::Object* dex_caches_object = header.GetImageRoot(ImageHeader::kDexCaches);
+ DCHECK(dex_caches_object != nullptr);
+ mirror::ObjectArray<mirror::DexCache>* dex_caches =
+ dex_caches_object->AsObjectArray<mirror::DexCache>();
+ const OatFile* oat_file = space->GetOatFile();
+ for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
+ mirror::DexCache* dex_cache = dex_caches->Get(i);
+ std::string dex_file_location(dex_cache->GetLocation()->ToModifiedUtf8());
+ std::unique_ptr<const DexFile> dex_file = OpenOatDexFile(oat_file,
+ dex_file_location.c_str(),
+ error_msg);
+ if (dex_file == nullptr) {
+ return false;
+ }
+ dex_cache->SetDexFile(dex_file.get());
+ out_dex_files->push_back(std::move(dex_file));
+ }
+ return true;
+}
+
bool ClassLinker::AddImageSpace(
gc::space::ImageSpace* space,
Handle<mirror::ClassLoader> class_loader,
@@ -1528,29 +1586,10 @@
dex_location_path = dex_location_path.substr(0, pos + 1); // Keep trailing '/'
dex_file_location = dex_location_path + dex_file_location;
}
- const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_file_location.c_str(),
- nullptr);
- if (oat_dex_file == nullptr) {
- *error_msg = StringPrintf("Failed finding oat dex file for %s %s",
- oat_file->GetLocation().c_str(),
- dex_file_location.c_str());
- return false;
- }
- std::string inner_error_msg;
- std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&inner_error_msg);
+ std::unique_ptr<const DexFile> dex_file = OpenOatDexFile(oat_file,
+ dex_file_location.c_str(),
+ error_msg);
if (dex_file == nullptr) {
- *error_msg = StringPrintf("Failed to open dex file %s from within oat file %s error '%s'",
- dex_file_location.c_str(),
- oat_file->GetLocation().c_str(),
- inner_error_msg.c_str());
- return false;
- }
-
- if (dex_file->GetLocationChecksum() != oat_dex_file->GetDexFileLocationChecksum()) {
- *error_msg = StringPrintf("Checksums do not match for %s: %x vs %x",
- dex_file_location.c_str(),
- dex_file->GetLocationChecksum(),
- oat_dex_file->GetDexFileLocationChecksum());
return false;
}
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 492a228..36ed820 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -149,6 +149,12 @@
REQUIRES(!dex_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
+ bool OpenImageDexFiles(gc::space::ImageSpace* space,
+ std::vector<std::unique_ptr<const DexFile>>* out_dex_files,
+ std::string* error_msg)
+ REQUIRES(!dex_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
// Finds a class by its descriptor, loading it if necessary.
// If class_loader is null, searches boot_class_path_.
mirror::Class* FindClass(Thread* self,
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index ddf2749..9c9b8c5 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -230,7 +230,10 @@
return true;
}
-bool DexFileVerifier::CheckValidOffsetAndSize(uint32_t offset, uint32_t size, const char* label) {
+bool DexFileVerifier::CheckValidOffsetAndSize(uint32_t offset,
+ uint32_t size,
+ size_t alignment,
+ const char* label) {
if (size == 0) {
if (offset != 0) {
ErrorStringPrintf("Offset(%d) should be zero when size is zero for %s.", offset, label);
@@ -241,6 +244,10 @@
ErrorStringPrintf("Offset(%d) should be within file size(%zu) for %s.", offset, size_, label);
return false;
}
+ if (alignment != 0 && !IsAlignedParam(offset, alignment)) {
+ ErrorStringPrintf("Offset(%d) should be aligned by %zu for %s.", offset, alignment, label);
+ return false;
+ }
return true;
}
@@ -275,16 +282,43 @@
// Check that all offsets are inside the file.
bool result =
- CheckValidOffsetAndSize(header_->link_off_, header_->link_size_, "link") &&
- CheckValidOffsetAndSize(header_->map_off_, header_->map_off_, "map") &&
- CheckValidOffsetAndSize(header_->string_ids_off_, header_->string_ids_size_, "string-ids") &&
- CheckValidOffsetAndSize(header_->type_ids_off_, header_->type_ids_size_, "type-ids") &&
- CheckValidOffsetAndSize(header_->proto_ids_off_, header_->proto_ids_size_, "proto-ids") &&
- CheckValidOffsetAndSize(header_->field_ids_off_, header_->field_ids_size_, "field-ids") &&
- CheckValidOffsetAndSize(header_->method_ids_off_, header_->method_ids_size_, "method-ids") &&
- CheckValidOffsetAndSize(header_->class_defs_off_, header_->class_defs_size_, "class-defs") &&
- CheckValidOffsetAndSize(header_->data_off_, header_->data_size_, "data");
-
+ CheckValidOffsetAndSize(header_->link_off_,
+ header_->link_size_,
+ 0 /* unaligned */,
+ "link") &&
+ CheckValidOffsetAndSize(header_->map_off_,
+ header_->map_off_,
+ 4,
+ "map") &&
+ CheckValidOffsetAndSize(header_->string_ids_off_,
+ header_->string_ids_size_,
+ 4,
+ "string-ids") &&
+ CheckValidOffsetAndSize(header_->type_ids_off_,
+ header_->type_ids_size_,
+ 4,
+ "type-ids") &&
+ CheckValidOffsetAndSize(header_->proto_ids_off_,
+ header_->proto_ids_size_,
+ 4,
+ "proto-ids") &&
+ CheckValidOffsetAndSize(header_->field_ids_off_,
+ header_->field_ids_size_,
+ 4,
+ "field-ids") &&
+ CheckValidOffsetAndSize(header_->method_ids_off_,
+ header_->method_ids_size_,
+ 4,
+ "method-ids") &&
+ CheckValidOffsetAndSize(header_->class_defs_off_,
+ header_->class_defs_size_,
+ 4,
+ "class-defs") &&
+ CheckValidOffsetAndSize(header_->data_off_,
+ header_->data_size_,
+ 0, // Unaligned, spec doesn't talk about it, even though size
+ // is supposed to be a multiple of 4.
+ "data");
return result;
}
@@ -1965,6 +1999,11 @@
// Check that references in annotations_directory_item are to right class.
if (item->annotations_off_ != 0) {
+ // annotations_off_ is supposed to be aligned by 4.
+ if (!IsAlignedParam(item->annotations_off_, 4)) {
+ ErrorStringPrintf("Invalid annotations_off_, not aligned by 4");
+ return false;
+ }
const uint8_t* data = begin_ + item->annotations_off_;
bool success;
uint16_t annotations_definer = FindFirstAnnotationsDirectoryDefiner(data, &success);
diff --git a/runtime/dex_file_verifier.h b/runtime/dex_file_verifier.h
index ddfeea2..be0e6d8 100644
--- a/runtime/dex_file_verifier.h
+++ b/runtime/dex_file_verifier.h
@@ -48,7 +48,7 @@
bool CheckList(size_t element_size, const char* label, const uint8_t* *ptr);
// Checks whether the offset is zero (when size is zero) or that the offset falls within the area
// claimed by the file.
- bool CheckValidOffsetAndSize(uint32_t offset, uint32_t size, const char* label);
+ bool CheckValidOffsetAndSize(uint32_t offset, uint32_t size, size_t alignment, const char* label);
bool CheckIndex(uint32_t field, uint32_t limit, const char* label);
bool CheckHeader();
diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc
index 558a6ed..44cf2ee 100644
--- a/runtime/dex_file_verifier_test.cc
+++ b/runtime/dex_file_verifier_test.cc
@@ -1253,4 +1253,63 @@
"DBG_START_LOCAL type_idx");
}
+TEST_F(DexFileVerifierTest, SectionAlignment) {
+ {
+ // The input dex file should be good before modification. Any file is fine, as long as it
+ // uses all sections.
+ ScratchFile tmp;
+ std::string error_msg;
+ std::unique_ptr<const DexFile> raw(OpenDexFileBase64(kGoodTestDex,
+ tmp.GetFilename().c_str(),
+ &error_msg));
+ ASSERT_TRUE(raw.get() != nullptr) << error_msg;
+ }
+
+ // Modify all section offsets to be unaligned.
+ constexpr size_t kSections = 7;
+ for (size_t i = 0; i < kSections; ++i) {
+ VerifyModification(
+ kGoodTestDex,
+ "section_align",
+ [&](DexFile* dex_file) {
+ DexFile::Header* header = const_cast<DexFile::Header*>(
+ reinterpret_cast<const DexFile::Header*>(dex_file->Begin()));
+ uint32_t* off_ptr;
+ switch (i) {
+ case 0:
+ off_ptr = &header->map_off_;
+ break;
+ case 1:
+ off_ptr = &header->string_ids_off_;
+ break;
+ case 2:
+ off_ptr = &header->type_ids_off_;
+ break;
+ case 3:
+ off_ptr = &header->proto_ids_off_;
+ break;
+ case 4:
+ off_ptr = &header->field_ids_off_;
+ break;
+ case 5:
+ off_ptr = &header->method_ids_off_;
+ break;
+ case 6:
+ off_ptr = &header->class_defs_off_;
+ break;
+
+ static_assert(kSections == 7, "kSections is wrong");
+ default:
+ LOG(FATAL) << "Unexpected section";
+ UNREACHABLE();
+ }
+ ASSERT_TRUE(off_ptr != nullptr);
+ ASSERT_NE(*off_ptr, 0U) << i; // Should already contain a value (in use).
+ (*off_ptr)++; // Add one, which should misalign it (all the sections
+ // above are aligned by 4).
+ },
+ "should be aligned by 4 for");
+ }
+}
+
} // namespace art
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 52da28b..3b4b88d 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -57,9 +57,12 @@
}
template <typename ElfTypes>
-ElfFileImpl<ElfTypes>* ElfFileImpl<ElfTypes>::Open(
- File* file, bool writable, bool program_header_only,
- std::string* error_msg, uint8_t* requested_base) {
+ElfFileImpl<ElfTypes>* ElfFileImpl<ElfTypes>::Open(File* file,
+ bool writable,
+ bool program_header_only,
+ bool low_4gb,
+ std::string* error_msg,
+ uint8_t* requested_base) {
std::unique_ptr<ElfFileImpl<ElfTypes>> elf_file(new ElfFileImpl<ElfTypes>
(file, writable, program_header_only, requested_base));
int prot;
@@ -71,26 +74,29 @@
prot = PROT_READ;
flags = MAP_PRIVATE;
}
- if (!elf_file->Setup(prot, flags, error_msg)) {
+ if (!elf_file->Setup(prot, flags, low_4gb, error_msg)) {
return nullptr;
}
return elf_file.release();
}
template <typename ElfTypes>
-ElfFileImpl<ElfTypes>* ElfFileImpl<ElfTypes>::Open(
- File* file, int prot, int flags, std::string* error_msg) {
+ElfFileImpl<ElfTypes>* ElfFileImpl<ElfTypes>::Open(File* file,
+ int prot,
+ int flags,
+ bool low_4gb,
+ std::string* error_msg) {
std::unique_ptr<ElfFileImpl<ElfTypes>> elf_file(new ElfFileImpl<ElfTypes>
(file, (prot & PROT_WRITE) == PROT_WRITE, /*program_header_only*/false,
/*requested_base*/nullptr));
- if (!elf_file->Setup(prot, flags, error_msg)) {
+ if (!elf_file->Setup(prot, flags, low_4gb, error_msg)) {
return nullptr;
}
return elf_file.release();
}
template <typename ElfTypes>
-bool ElfFileImpl<ElfTypes>::Setup(int prot, int flags, std::string* error_msg) {
+bool ElfFileImpl<ElfTypes>::Setup(int prot, int flags, bool low_4gb, std::string* error_msg) {
int64_t temp_file_length = file_->GetLength();
if (temp_file_length < 0) {
errno = -temp_file_length;
@@ -114,7 +120,7 @@
flags,
file_->Fd(),
0,
- /*low4_gb*/false,
+ low_4gb,
file_->GetPath().c_str(),
error_msg),
error_msg)) {
@@ -133,7 +139,7 @@
flags,
file_->Fd(),
0,
- /*low4_gb*/false,
+ low_4gb,
file_->GetPath().c_str(),
error_msg),
error_msg)) {
@@ -147,7 +153,7 @@
flags,
file_->Fd(),
0,
- /*low4_gb*/false,
+ low_4gb,
file_->GetPath().c_str(),
error_msg),
error_msg)) {
@@ -1058,7 +1064,7 @@
}
template <typename ElfTypes>
-bool ElfFileImpl<ElfTypes>::Load(bool executable, std::string* error_msg) {
+bool ElfFileImpl<ElfTypes>::Load(bool executable, bool low_4gb, std::string* error_msg) {
CHECK(program_header_only_) << file_->GetPath();
if (executable) {
@@ -1124,7 +1130,10 @@
}
std::unique_ptr<MemMap> reserve(MemMap::MapAnonymous(reservation_name.c_str(),
reserve_base_override,
- loaded_size, PROT_NONE, false, false,
+ loaded_size,
+ PROT_NONE,
+ low_4gb,
+ false,
error_msg));
if (reserve.get() == nullptr) {
*error_msg = StringPrintf("Failed to allocate %s: %s",
@@ -1656,7 +1665,11 @@
CHECK_NE(elf32_.get() == nullptr, elf64_.get() == nullptr);
}
-ElfFile* ElfFile::Open(File* file, bool writable, bool program_header_only, std::string* error_msg,
+ElfFile* ElfFile::Open(File* file,
+ bool writable,
+ bool program_header_only,
+ bool low_4gb,
+ std::string* error_msg,
uint8_t* requested_base) {
if (file->GetLength() < EI_NIDENT) {
*error_msg = StringPrintf("File %s is too short to be a valid ELF file",
@@ -1668,7 +1681,7 @@
MAP_PRIVATE,
file->Fd(),
0,
- /*low4_gb*/false,
+ low_4gb,
file->GetPath().c_str(),
error_msg));
if (map == nullptr && map->Size() != EI_NIDENT) {
@@ -1676,14 +1689,22 @@
}
uint8_t* header = map->Begin();
if (header[EI_CLASS] == ELFCLASS64) {
- ElfFileImpl64* elf_file_impl = ElfFileImpl64::Open(file, writable, program_header_only,
- error_msg, requested_base);
+ ElfFileImpl64* elf_file_impl = ElfFileImpl64::Open(file,
+ writable,
+ program_header_only,
+ low_4gb,
+ error_msg,
+ requested_base);
if (elf_file_impl == nullptr)
return nullptr;
return new ElfFile(elf_file_impl);
} else if (header[EI_CLASS] == ELFCLASS32) {
- ElfFileImpl32* elf_file_impl = ElfFileImpl32::Open(file, writable, program_header_only,
- error_msg, requested_base);
+ ElfFileImpl32* elf_file_impl = ElfFileImpl32::Open(file,
+ writable,
+ program_header_only,
+ low_4gb,
+ error_msg,
+ requested_base);
if (elf_file_impl == nullptr) {
return nullptr;
}
@@ -1698,6 +1719,8 @@
}
ElfFile* ElfFile::Open(File* file, int mmap_prot, int mmap_flags, std::string* error_msg) {
+ // low_4gb support not required for this path.
+ constexpr bool low_4gb = false;
if (file->GetLength() < EI_NIDENT) {
*error_msg = StringPrintf("File %s is too short to be a valid ELF file",
file->GetPath().c_str());
@@ -1708,7 +1731,7 @@
MAP_PRIVATE,
file->Fd(),
0,
- /*low4_gb*/false,
+ low_4gb,
file->GetPath().c_str(),
error_msg));
if (map == nullptr && map->Size() != EI_NIDENT) {
@@ -1716,13 +1739,21 @@
}
uint8_t* header = map->Begin();
if (header[EI_CLASS] == ELFCLASS64) {
- ElfFileImpl64* elf_file_impl = ElfFileImpl64::Open(file, mmap_prot, mmap_flags, error_msg);
+ ElfFileImpl64* elf_file_impl = ElfFileImpl64::Open(file,
+ mmap_prot,
+ mmap_flags,
+ low_4gb,
+ error_msg);
if (elf_file_impl == nullptr) {
return nullptr;
}
return new ElfFile(elf_file_impl);
} else if (header[EI_CLASS] == ELFCLASS32) {
- ElfFileImpl32* elf_file_impl = ElfFileImpl32::Open(file, mmap_prot, mmap_flags, error_msg);
+ ElfFileImpl32* elf_file_impl = ElfFileImpl32::Open(file,
+ mmap_prot,
+ mmap_flags,
+ low_4gb,
+ error_msg);
if (elf_file_impl == nullptr) {
return nullptr;
}
@@ -1744,8 +1775,8 @@
return elf32_->func(__VA_ARGS__); \
}
-bool ElfFile::Load(bool executable, std::string* error_msg) {
- DELEGATE_TO_IMPL(Load, executable, error_msg);
+bool ElfFile::Load(bool executable, bool low_4gb, std::string* error_msg) {
+ DELEGATE_TO_IMPL(Load, executable, low_4gb, error_msg);
}
const uint8_t* ElfFile::FindDynamicSymbolAddress(const std::string& symbol_name) const {
@@ -1810,7 +1841,7 @@
}
bool ElfFile::Strip(File* file, std::string* error_msg) {
- std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file, true, false, error_msg));
+ std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file, true, false, /*low_4gb*/false, error_msg));
if (elf_file.get() == nullptr) {
return false;
}
diff --git a/runtime/elf_file.h b/runtime/elf_file.h
index 1188c97..b5229b5 100644
--- a/runtime/elf_file.h
+++ b/runtime/elf_file.h
@@ -38,15 +38,22 @@
// ELFObjectFile.
class ElfFile {
public:
- static ElfFile* Open(File* file, bool writable, bool program_header_only, std::string* error_msg,
+ static ElfFile* Open(File* file,
+ bool writable,
+ bool program_header_only,
+ bool low_4gb,
+ std::string* error_msg,
uint8_t* requested_base = nullptr); // TODO: move arg to before error_msg.
// Open with specific mmap flags, Always maps in the whole file, not just the
// program header sections.
- static ElfFile* Open(File* file, int mmap_prot, int mmap_flags, std::string* error_msg);
+ static ElfFile* Open(File* file,
+ int mmap_prot,
+ int mmap_flags,
+ std::string* error_msg);
~ElfFile();
// Load segments into memory based on PT_LOAD program headers
- bool Load(bool executable, std::string* error_msg);
+ bool Load(bool executable, bool low_4gb, std::string* error_msg);
const uint8_t* FindDynamicSymbolAddress(const std::string& symbol_name) const;
diff --git a/runtime/elf_file_impl.h b/runtime/elf_file_impl.h
index 2af31dc..1cdbedc 100644
--- a/runtime/elf_file_impl.h
+++ b/runtime/elf_file_impl.h
@@ -48,9 +48,17 @@
using Elf_Phdr = typename ElfTypes::Phdr;
using Elf_Dyn = typename ElfTypes::Dyn;
- static ElfFileImpl* Open(File* file, bool writable, bool program_header_only,
- std::string* error_msg, uint8_t* requested_base = nullptr);
- static ElfFileImpl* Open(File* file, int mmap_prot, int mmap_flags, std::string* error_msg);
+ static ElfFileImpl* Open(File* file,
+ bool writable,
+ bool program_header_only,
+ bool low_4gb,
+ std::string* error_msg,
+ uint8_t* requested_base = nullptr);
+ static ElfFileImpl* Open(File* file,
+ int mmap_prot,
+ int mmap_flags,
+ bool low_4gb,
+ std::string* error_msg);
~ElfFileImpl();
const File& GetFile() const {
@@ -111,7 +119,7 @@
// Load segments into memory based on PT_LOAD program headers.
// executable is true at run time, false at compile time.
- bool Load(bool executable, std::string* error_msg);
+ bool Load(bool executable, bool low_4gb, std::string* error_msg);
bool Fixup(Elf_Addr base_address);
bool FixupDynamic(Elf_Addr base_address);
@@ -129,7 +137,7 @@
private:
ElfFileImpl(File* file, bool writable, bool program_header_only, uint8_t* requested_base);
- bool Setup(int prot, int flags, std::string* error_msg);
+ bool Setup(int prot, int flags, bool low_4gb, std::string* error_msg);
bool SetMap(MemMap* map, std::string* error_msg);
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 7e7d904..3e6b453 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -173,7 +173,10 @@
if (klass == nullptr) {
return nullptr;
}
- return klass->Alloc<kInstrumented>(self, Runtime::Current()->GetHeap()->GetCurrentAllocator());
+ // CheckObjectAlloc can cause thread suspension which means we may now be instrumented.
+ return klass->Alloc</*kInstrumented*/true>(
+ self,
+ Runtime::Current()->GetHeap()->GetCurrentAllocator());
}
DCHECK(klass != nullptr);
return klass->Alloc<kInstrumented>(self, allocator_type);
@@ -194,7 +197,9 @@
}
gc::Heap* heap = Runtime::Current()->GetHeap();
// Pass in false since the object cannot be finalizable.
- return klass->Alloc<kInstrumented, false>(self, heap->GetCurrentAllocator());
+ // CheckClassInitializedForObjectAlloc can cause thread suspension which means we may now be
+ // instrumented.
+ return klass->Alloc</*kInstrumented*/true, false>(self, heap->GetCurrentAllocator());
}
// Pass in false since the object cannot be finalizable.
return klass->Alloc<kInstrumented, false>(self, allocator_type);
@@ -265,9 +270,12 @@
return nullptr;
}
gc::Heap* heap = Runtime::Current()->GetHeap();
- return mirror::Array::Alloc<kInstrumented>(self, klass, component_count,
- klass->GetComponentSizeShift(),
- heap->GetCurrentAllocator());
+ // CheckArrayAlloc can cause thread suspension which means we may now be instrumented.
+ return mirror::Array::Alloc</*kInstrumented*/true>(self,
+ klass,
+ component_count,
+ klass->GetComponentSizeShift(),
+ heap->GetCurrentAllocator());
}
return mirror::Array::Alloc<kInstrumented>(self, klass, component_count,
klass->GetComponentSizeShift(), allocator_type);
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 4672483..7319045 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -92,7 +92,7 @@
}
AllocRecordObjectMap::~AllocRecordObjectMap() {
- STLDeleteValues(&entries_);
+ Clear();
}
void AllocRecordObjectMap::VisitRoots(RootVisitor* visitor) {
@@ -223,7 +223,11 @@
if (heap->IsAllocTrackingEnabled()) {
return; // Already enabled, bail.
}
- AllocRecordObjectMap* records = new AllocRecordObjectMap();
+ AllocRecordObjectMap* records = heap->GetAllocationRecords();
+ if (records == nullptr) {
+ records = new AllocRecordObjectMap;
+ heap->SetAllocationRecords(records);
+ }
CHECK(records != nullptr);
records->SetProperties();
std::string self_name;
@@ -237,7 +241,6 @@
LOG(INFO) << "Enabling alloc tracker (" << records->alloc_record_max_ << " entries of "
<< records->max_stack_depth_ << " frames, taking up to "
<< PrettySize(sz * records->alloc_record_max_) << ")";
- heap->SetAllocationRecords(records);
}
Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
{
@@ -247,7 +250,6 @@
} else {
// Delete outside of the critical section to avoid possible lock violations like the runtime
// shutdown lock.
- std::unique_ptr<AllocRecordObjectMap> map;
{
MutexLock mu(self, *Locks::alloc_tracker_lock_);
if (!heap->IsAllocTrackingEnabled()) {
@@ -255,7 +257,8 @@
}
heap->SetAllocTrackingEnabled(false);
LOG(INFO) << "Disabling alloc tracker";
- map = heap->ReleaseAllocationRecords();
+ AllocRecordObjectMap* records = heap->GetAllocationRecords();
+ records->Clear();
}
// If an allocation comes in before we uninstrument, we will safely drop it on the floor.
Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
@@ -303,5 +306,10 @@
DCHECK_LE(records->Size(), records->alloc_record_max_);
}
+void AllocRecordObjectMap::Clear() {
+ STLDeleteValues(&entries_);
+ entries_.clear();
+}
+
} // namespace gc
} // namespace art
diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h
index ffdfd31..18cce4d 100644
--- a/runtime/gc/allocation_record.h
+++ b/runtime/gc/allocation_record.h
@@ -306,6 +306,8 @@
return entries_.rend();
}
+ void Clear() REQUIRES(Locks::alloc_tracker_lock_);
+
private:
static constexpr size_t kDefaultNumAllocRecords = 512 * 1024;
static constexpr size_t kDefaultNumRecentRecords = 64 * 1024 - 1;
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index d7023d8..59fd4a6 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -174,9 +174,6 @@
} else {
DCHECK(!Runtime::Current()->HasStatsEnabled());
}
- if (AllocatorHasAllocationStack(allocator)) {
- PushOnAllocationStack(self, &obj);
- }
if (kInstrumented) {
if (IsAllocTrackingEnabled()) {
// Use obj->GetClass() instead of klass, because PushOnAllocationStack() could move klass
@@ -185,6 +182,9 @@
} else {
DCHECK(!IsAllocTrackingEnabled());
}
+ if (AllocatorHasAllocationStack(allocator)) {
+ PushOnAllocationStack(self, &obj);
+ }
if (kInstrumented) {
if (gc_stress_mode_) {
CheckGcStressMode(self, &obj);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 2e5b599..f4fccee 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -3940,10 +3940,6 @@
allocation_records_.reset(records);
}
-std::unique_ptr<AllocRecordObjectMap> Heap::ReleaseAllocationRecords() {
- return std::move(allocation_records_);
-}
-
void Heap::VisitAllocationRecords(RootVisitor* visitor) const {
if (IsAllocTrackingEnabled()) {
MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index e0a53a0..9eda422 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -766,10 +766,6 @@
return allocation_records_.get();
}
- // Release ownership of the allocation records.
- std::unique_ptr<AllocRecordObjectMap> ReleaseAllocationRecords()
- REQUIRES(Locks::alloc_tracker_lock_);
-
void SetAllocationRecords(AllocRecordObjectMap* records)
REQUIRES(Locks::alloc_tracker_lock_);
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 9ecd391..5ff1cb7 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -16,7 +16,6 @@
#include "image_space.h"
-#include <dirent.h>
#include <lz4.h>
#include <random>
#include <sys/statvfs.h>
@@ -29,9 +28,9 @@
#include "base/scoped_flock.h"
#include "base/systrace.h"
#include "base/time_utils.h"
-#include "base/unix_file/fd_file.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "image-inl.h"
+#include "image_space_fs.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "oat_file.h"
@@ -79,105 +78,6 @@
return r;
}
-// We are relocating or generating the core image. We should get rid of everything. It is all
-// out-of-date. We also don't really care if this fails since it is just a convenience.
-// Adapted from prune_dex_cache(const char* subdir) in frameworks/native/cmds/installd/commands.c
-// Note this should only be used during first boot.
-static void RealPruneDalvikCache(const std::string& cache_dir_path);
-
-static void PruneDalvikCache(InstructionSet isa) {
- CHECK_NE(isa, kNone);
- // Prune the base /data/dalvik-cache.
- RealPruneDalvikCache(GetDalvikCacheOrDie(".", false));
- // Prune /data/dalvik-cache/<isa>.
- RealPruneDalvikCache(GetDalvikCacheOrDie(GetInstructionSetString(isa), false));
-}
-
-static void RealPruneDalvikCache(const std::string& cache_dir_path) {
- if (!OS::DirectoryExists(cache_dir_path.c_str())) {
- return;
- }
- DIR* cache_dir = opendir(cache_dir_path.c_str());
- if (cache_dir == nullptr) {
- PLOG(WARNING) << "Unable to open " << cache_dir_path << " to delete it's contents";
- return;
- }
-
- for (struct dirent* de = readdir(cache_dir); de != nullptr; de = readdir(cache_dir)) {
- const char* name = de->d_name;
- if (strcmp(name, ".") == 0 || strcmp(name, "..") == 0) {
- continue;
- }
- // We only want to delete regular files and symbolic links.
- if (de->d_type != DT_REG && de->d_type != DT_LNK) {
- if (de->d_type != DT_DIR) {
- // We do expect some directories (namely the <isa> for pruning the base dalvik-cache).
- LOG(WARNING) << "Unexpected file type of " << std::hex << de->d_type << " encountered.";
- }
- continue;
- }
- std::string cache_file(cache_dir_path);
- cache_file += '/';
- cache_file += name;
- if (TEMP_FAILURE_RETRY(unlink(cache_file.c_str())) != 0) {
- PLOG(ERROR) << "Unable to unlink " << cache_file;
- continue;
- }
- }
- CHECK_EQ(0, TEMP_FAILURE_RETRY(closedir(cache_dir))) << "Unable to close directory.";
-}
-
-// We write out an empty file to the zygote's ISA specific cache dir at the start of
-// every zygote boot and delete it when the boot completes. If we find a file already
-// present, it usually means the boot didn't complete. We wipe the entire dalvik
-// cache if that's the case.
-static void MarkZygoteStart(const InstructionSet isa, const uint32_t max_failed_boots) {
- const std::string isa_subdir = GetDalvikCacheOrDie(GetInstructionSetString(isa), false);
- const std::string boot_marker = isa_subdir + "/.booting";
- const char* file_name = boot_marker.c_str();
-
- uint32_t num_failed_boots = 0;
- std::unique_ptr<File> file(OS::OpenFileReadWrite(file_name));
- if (file.get() == nullptr) {
- file.reset(OS::CreateEmptyFile(file_name));
-
- if (file.get() == nullptr) {
- PLOG(WARNING) << "Failed to create boot marker.";
- return;
- }
- } else {
- if (!file->ReadFully(&num_failed_boots, sizeof(num_failed_boots))) {
- PLOG(WARNING) << "Failed to read boot marker.";
- file->Erase();
- return;
- }
- }
-
- if (max_failed_boots != 0 && num_failed_boots > max_failed_boots) {
- LOG(WARNING) << "Incomplete boot detected. Pruning dalvik cache";
- RealPruneDalvikCache(isa_subdir);
- }
-
- ++num_failed_boots;
- VLOG(startup) << "Number of failed boots on : " << boot_marker << " = " << num_failed_boots;
-
- if (lseek(file->Fd(), 0, SEEK_SET) == -1) {
- PLOG(WARNING) << "Failed to write boot marker.";
- file->Erase();
- return;
- }
-
- if (!file->WriteFully(&num_failed_boots, sizeof(num_failed_boots))) {
- PLOG(WARNING) << "Failed to write boot marker.";
- file->Erase();
- return;
- }
-
- if (file->FlushCloseOrErase() != 0) {
- PLOG(WARNING) << "Failed to flush boot marker.";
- }
-}
-
static bool GenerateImage(const std::string& image_filename, InstructionSet image_isa,
std::string* error_msg) {
const std::string boot_class_path_string(Runtime::Current()->GetBootClassPathString());
@@ -479,9 +379,33 @@
bool has_cache = false;
bool dalvik_cache_exists = false;
bool is_global_cache = true;
- const bool found_image = FindImageFilename(image_location, image_isa, &system_filename,
- &has_system, &cache_filename, &dalvik_cache_exists,
- &has_cache, &is_global_cache);
+ bool found_image = FindImageFilename(image_location, image_isa, &system_filename,
+ &has_system, &cache_filename, &dalvik_cache_exists,
+ &has_cache, &is_global_cache);
+
+ // If we're starting with the global cache, and we're the zygote, try to see whether there are
+ // OTA artifacts from the A/B OTA preopting to move over.
+ // (It is structurally simpler to check this here, instead of complicating the compile/relocate
+ // logic below.)
+ const bool is_zygote = Runtime::Current()->IsZygote();
+ if (is_global_cache && is_zygote) {
+ VLOG(startup) << "Checking for A/B OTA data.";
+ TryMoveOTAArtifacts(cache_filename, dalvik_cache_exists);
+
+ // Retry. There are two cases where the old info is outdated:
+ // * There wasn't a boot image before (e.g., some failure on boot), but now the OTA preopted
+ // image has been moved in-place.
+ // * There was a boot image before, and we tried to move the OTA preopted image, but a failure
+ // happened and there is no file anymore.
+ found_image = FindImageFilename(image_location,
+ image_isa,
+ &system_filename,
+ &has_system,
+ &cache_filename,
+ &dalvik_cache_exists,
+ &has_cache,
+ &is_global_cache);
+ }
if (Runtime::Current()->IsZygote() && !secondary_image) {
MarkZygoteStart(image_isa, Runtime::Current()->GetZygoteMaxFailedBoots());
@@ -520,8 +444,16 @@
// Whether we can write to the cache.
success = false;
} else if (secondary_image) {
- reason = "Should not have to patch secondary image.";
- success = false;
+ if (Runtime::Current()->IsZygote()) {
+ // Secondary image is out of date. Clear cache and exit to let it retry from scratch.
+ LOG(ERROR) << "Cannot patch secondary image '" << image_location
+ << "', clearing dalvik_cache and restarting zygote.";
+ PruneDalvikCache(image_isa);
+ _exit(1);
+ } else {
+ reason = "Should not have to patch secondary image.";
+ success = false;
+ }
} else {
// Try to relocate.
success = RelocateImage(image_location, cache_filename.c_str(), image_isa, &reason);
@@ -693,7 +625,7 @@
}
// Returns the delta between the dest from the source.
- off_t Delta() const {
+ uintptr_t Delta() const {
return dest_ - source_;
}
@@ -715,6 +647,13 @@
const uintptr_t length_;
};
+std::ostream& operator<<(std::ostream& os, const RelocationRange& reloc) {
+ return os << "(" << reinterpret_cast<const void*>(reloc.Source()) << "-"
+ << reinterpret_cast<const void*>(reloc.Source() + reloc.Length()) << ")->("
+ << reinterpret_cast<const void*>(reloc.Dest()) << "-"
+ << reinterpret_cast<const void*>(reloc.Dest() + reloc.Length()) << ")";
+}
+
class FixupVisitor : public ValueObject {
public:
FixupVisitor(const RelocationRange& boot_image,
@@ -746,7 +685,7 @@
ALWAYS_INLINE const void* ForwardCode(const void* src) const {
const uintptr_t uint_src = reinterpret_cast<uintptr_t>(src);
if (boot_oat_.InSource(uint_src)) {
- return reinterpret_cast<const void*>(boot_oat_.ToDest(uint_src));
+ return reinterpret_cast<const void*>(boot_oat_.ToDest(uint_src));
}
if (app_oat_.InSource(uint_src)) {
return reinterpret_cast<const void*>(app_oat_.ToDest(uint_src));
@@ -763,13 +702,6 @@
const RelocationRange app_oat_;
};
-std::ostream& operator<<(std::ostream& os, const RelocationRange& reloc) {
- return os << "(" << reinterpret_cast<const void*>(reloc.Source()) << "-"
- << reinterpret_cast<const void*>(reloc.Source() + reloc.Length()) << ")->("
- << reinterpret_cast<const void*>(reloc.Dest()) << "-"
- << reinterpret_cast<const void*>(reloc.Dest() + reloc.Length()) << ")";
-}
-
// Adapt for mirror::Class::FixupNativePointers.
class FixupObjectAdapter : public FixupVisitor {
public:
@@ -832,8 +764,10 @@
public:
template<typename... Args>
explicit FixupObjectVisitor(gc::accounting::ContinuousSpaceBitmap* pointer_array_visited,
+ const size_t pointer_size,
Args... args)
: FixupVisitor(args...),
+ pointer_size_(pointer_size),
pointer_array_visited_(pointer_array_visited) {}
// Fix up separately since we also need to fix up method entrypoints.
@@ -867,7 +801,7 @@
if (array != nullptr &&
visitor.IsInAppImage(array) &&
!pointer_array_visited_->Test(array)) {
- array->Fixup<kVerifyNone, kWithoutReadBarrier>(array, sizeof(void*), visitor);
+ array->Fixup<kVerifyNone, kWithoutReadBarrier>(array, pointer_size_, visitor);
pointer_array_visited_->Set(array);
}
}
@@ -889,7 +823,7 @@
if (obj->IsClass<kVerifyNone, kWithoutReadBarrier>()) {
mirror::Class* klass = obj->AsClass<kVerifyNone, kWithoutReadBarrier>();
FixupObjectAdapter visitor(boot_image_, boot_oat_, app_image_, app_oat_);
- klass->FixupNativePointers<kVerifyNone, kWithoutReadBarrier>(klass, sizeof(void*), visitor);
+ klass->FixupNativePointers<kVerifyNone, kWithoutReadBarrier>(klass, pointer_size_, visitor);
// Deal with the pointer arrays. Use the helper function since multiple classes can reference
// the same arrays.
VisitPointerArray(klass->GetVTable<kVerifyNone, kWithoutReadBarrier>(), visitor);
@@ -908,6 +842,7 @@
}
private:
+ const size_t pointer_size_;
gc::accounting::ContinuousSpaceBitmap* const pointer_array_visited_;
};
@@ -926,7 +861,8 @@
class ForwardCodeAdapter {
public:
- ALWAYS_INLINE ForwardCodeAdapter(const FixupVisitor* visitor) : visitor_(visitor) {}
+ ALWAYS_INLINE ForwardCodeAdapter(const FixupVisitor* visitor)
+ : visitor_(visitor) {}
template <typename T>
ALWAYS_INLINE T* operator()(T* src) const {
@@ -940,19 +876,21 @@
class FixupArtMethodVisitor : public FixupVisitor, public ArtMethodVisitor {
public:
template<typename... Args>
- explicit FixupArtMethodVisitor(bool fixup_heap_objects, Args... args)
+ explicit FixupArtMethodVisitor(bool fixup_heap_objects, size_t pointer_size, Args... args)
: FixupVisitor(args...),
- fixup_heap_objects_(fixup_heap_objects) {}
+ fixup_heap_objects_(fixup_heap_objects),
+ pointer_size_(pointer_size) {}
virtual void Visit(ArtMethod* method) NO_THREAD_SAFETY_ANALYSIS {
if (fixup_heap_objects_) {
- method->UpdateObjectsForImageRelocation(ForwardObjectAdapter(this));
+ method->UpdateObjectsForImageRelocation(ForwardObjectAdapter(this), pointer_size_);
}
- method->UpdateEntrypoints<kWithoutReadBarrier>(ForwardCodeAdapter(this));
+ method->UpdateEntrypoints<kWithoutReadBarrier>(ForwardCodeAdapter(this), pointer_size_);
}
private:
const bool fixup_heap_objects_;
+ const size_t pointer_size_;
};
class FixupArtFieldVisitor : public FixupVisitor, public ArtFieldVisitor {
@@ -988,6 +926,7 @@
uint32_t boot_image_end = 0;
uint32_t boot_oat_begin = 0;
uint32_t boot_oat_end = 0;
+ const size_t pointer_size = image_header.GetPointerSize();
gc::Heap* const heap = Runtime::Current()->GetHeap();
heap->GetBootImagesSize(&boot_image_begin, &boot_image_end, &boot_oat_begin, &boot_oat_end);
CHECK_NE(boot_image_begin, boot_image_end)
@@ -1050,6 +989,7 @@
target_base,
image_header.GetImageSize()));
FixupObjectVisitor fixup_object_visitor(visited_bitmap.get(),
+ pointer_size,
boot_image,
boot_oat,
app_image,
@@ -1099,10 +1039,10 @@
dex_cache->SetResolvedMethods(new_methods);
}
for (size_t j = 0, num = dex_cache->NumResolvedMethods(); j != num; ++j) {
- ArtMethod* orig = mirror::DexCache::GetElementPtrSize(new_methods, j, sizeof(void*));
+ ArtMethod* orig = mirror::DexCache::GetElementPtrSize(new_methods, j, pointer_size);
ArtMethod* copy = fixup_adapter.ForwardObject(orig);
if (orig != copy) {
- mirror::DexCache::SetElementPtrSize(new_methods, j, copy, sizeof(void*));
+ mirror::DexCache::SetElementPtrSize(new_methods, j, copy, pointer_size);
}
}
}
@@ -1113,10 +1053,10 @@
dex_cache->SetResolvedFields(new_fields);
}
for (size_t j = 0, num = dex_cache->NumResolvedFields(); j != num; ++j) {
- ArtField* orig = mirror::DexCache::GetElementPtrSize(new_fields, j, sizeof(void*));
+ ArtField* orig = mirror::DexCache::GetElementPtrSize(new_fields, j, pointer_size);
ArtField* copy = fixup_adapter.ForwardObject(orig);
if (orig != copy) {
- mirror::DexCache::SetElementPtrSize(new_fields, j, copy, sizeof(void*));
+ mirror::DexCache::SetElementPtrSize(new_fields, j, copy, pointer_size);
}
}
}
@@ -1125,11 +1065,16 @@
{
// Only touches objects in the app image, no need for mutator lock.
TimingLogger::ScopedTiming timing("Fixup methods", &logger);
- FixupArtMethodVisitor method_visitor(fixup_image, boot_image, boot_oat, app_image, app_oat);
+ FixupArtMethodVisitor method_visitor(fixup_image,
+ pointer_size,
+ boot_image,
+ boot_oat,
+ app_image,
+ app_oat);
image_header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods(
&method_visitor,
target_base,
- sizeof(void*));
+ pointer_size);
}
if (fixup_image) {
{
@@ -1283,7 +1228,7 @@
/*out*/out_error_msg));
if (map != nullptr) {
const size_t stored_size = image_header->GetDataSize();
- const size_t write_offset = sizeof(ImageHeader); // Skip the header.
+ const size_t decompress_offset = sizeof(ImageHeader); // Skip the header.
std::unique_ptr<MemMap> temp_map(MemMap::MapFile(sizeof(ImageHeader) + stored_size,
PROT_READ,
MAP_PRIVATE,
@@ -1302,14 +1247,15 @@
TimingLogger::ScopedTiming timing2("LZ4 decompress image", &logger);
const size_t decompressed_size = LZ4_decompress_safe(
reinterpret_cast<char*>(temp_map->Begin()) + sizeof(ImageHeader),
- reinterpret_cast<char*>(map->Begin()) + write_offset,
+ reinterpret_cast<char*>(map->Begin()) + decompress_offset,
stored_size,
- map->Size());
+ map->Size() - decompress_offset);
VLOG(image) << "Decompressing image took " << PrettyDuration(NanoTime() - start);
if (decompressed_size + sizeof(ImageHeader) != image_header->GetImageSize()) {
- *error_msg = StringPrintf("Decompressed size does not match expected image size %zu vs %zu",
- decompressed_size + sizeof(ImageHeader),
- image_header->GetImageSize());
+ *error_msg = StringPrintf(
+ "Decompressed size does not match expected image size %zu vs %zu",
+ decompressed_size + sizeof(ImageHeader),
+ image_header->GetImageSize());
return nullptr;
}
}
@@ -1456,6 +1402,7 @@
image_header.GetOatDataBegin(),
image_header.GetOatFileBegin(),
!Runtime::Current()->IsAotCompiler(),
+ /*low_4gb*/false,
nullptr,
error_msg);
if (oat_file == nullptr) {
diff --git a/runtime/gc/space/image_space_fs.h b/runtime/gc/space/image_space_fs.h
new file mode 100644
index 0000000..ec4bf92
--- /dev/null
+++ b/runtime/gc/space/image_space_fs.h
@@ -0,0 +1,307 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_SPACE_IMAGE_SPACE_FS_H_
+#define ART_RUNTIME_GC_SPACE_IMAGE_SPACE_FS_H_
+
+#include <dirent.h>
+#include <dlfcn.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/stringprintf.h"
+#include "base/unix_file/fd_file.h"
+#include "globals.h"
+#include "os.h"
+#include "utils.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+// This file contains helper code for ImageSpace. It has most of the file-system
+// related code, including handling A/B OTA.
+
+namespace impl {
+
+// Delete the directory and its (regular or link) contents. If the recurse flag is true, delete
+// sub-directories recursively.
+static void DeleteDirectoryContents(const std::string& dir, bool recurse) {
+ if (!OS::DirectoryExists(dir.c_str())) {
+ return;
+ }
+ DIR* c_dir = opendir(dir.c_str());
+ if (c_dir == nullptr) {
+ PLOG(WARNING) << "Unable to open " << dir << " to delete it's contents";
+ return;
+ }
+
+ for (struct dirent* de = readdir(c_dir); de != nullptr; de = readdir(c_dir)) {
+ const char* name = de->d_name;
+ if (strcmp(name, ".") == 0 || strcmp(name, "..") == 0) {
+ continue;
+ }
+ // We only want to delete regular files and symbolic links.
+ std::string file = StringPrintf("%s/%s", dir.c_str(), name);
+ if (de->d_type != DT_REG && de->d_type != DT_LNK) {
+ if (de->d_type == DT_DIR) {
+ if (recurse) {
+ DeleteDirectoryContents(file, recurse);
+ // Try to rmdir the directory.
+ if (TEMP_FAILURE_RETRY(rmdir(file.c_str())) != 0) {
+ PLOG(ERROR) << "Unable to rmdir " << file;
+ }
+ }
+ } else {
+ LOG(WARNING) << "Unexpected file type of " << std::hex << de->d_type << " encountered.";
+ }
+ } else {
+ // Try to unlink the file.
+ if (TEMP_FAILURE_RETRY(unlink(file.c_str())) != 0) {
+ PLOG(ERROR) << "Unable to unlink " << file;
+ }
+ }
+ }
+ CHECK_EQ(0, TEMP_FAILURE_RETRY(closedir(c_dir))) << "Unable to close directory.";
+}
+
+static bool HasContent(const char* dir) {
+ if (!OS::DirectoryExists(dir)) {
+ return false;
+ }
+ DIR* c_dir = opendir(dir);
+ if (c_dir == nullptr) {
+ PLOG(WARNING) << "Unable to open " << dir << " to delete it if empty";
+ return false;
+ }
+
+ for (struct dirent* de = readdir(c_dir); de != nullptr; de = readdir(c_dir)) {
+ const char* name = de->d_name;
+ if (strcmp(name, ".") == 0 || strcmp(name, "..") == 0) {
+ continue;
+ }
+ // Something here.
+ CHECK_EQ(0, TEMP_FAILURE_RETRY(closedir(c_dir))) << "Unable to close directory.";
+ return true;
+ }
+ CHECK_EQ(0, TEMP_FAILURE_RETRY(closedir(c_dir))) << "Unable to close directory.";
+ return false;
+}
+
+// Delete this directory, if empty. Then repeat with the parents. Skips non-existing directories.
+// If stop_at isn't null, the recursion will stop when a directory with the given name is found.
+static void DeleteEmptyDirectoriesUpTo(const std::string& dir, const char* stop_at) {
+ if (HasContent(dir.c_str())) {
+ return;
+ }
+ if (stop_at != nullptr) {
+ // This check isn't precise, but good enough in practice.
+ if (EndsWith(dir, stop_at)) {
+ return;
+ }
+ }
+ if (OS::DirectoryExists(dir.c_str())) {
+ if (TEMP_FAILURE_RETRY(rmdir(dir.c_str())) != 0) {
+ PLOG(ERROR) << "Unable to rmdir " << dir;
+ return;
+ }
+ }
+ size_t last_slash = dir.rfind('/');
+ if (last_slash != std::string::npos) {
+ DeleteEmptyDirectoriesUpTo(dir.substr(0, last_slash), stop_at);
+ }
+}
+
+static void MoveOTAArtifacts(const char* src, const char* trg) {
+ DCHECK(OS::DirectoryExists(src));
+ DCHECK(OS::DirectoryExists(trg));
+
+ if (HasContent(trg)) {
+ LOG(WARNING) << "We do not support merging caches, but the target isn't empty: " << src
+ << " to " << trg;
+ return;
+ }
+
+ if (TEMP_FAILURE_RETRY(rename(src, trg)) != 0) {
+ PLOG(ERROR) << "Could not rename OTA cache " << src << " to target " << trg;
+ }
+}
+
+// This is some dlopen/dlsym and hardcoded data to avoid a dependency on libselinux. Make sure
+// this stays in sync!
+static bool RelabelOTAFiles(const std::string& dalvik_cache_dir) {
+ // We only expect selinux on devices. Don't even attempt this on the host.
+ if (!kIsTargetBuild) {
+ return true;
+ }
+
+ // Custom deleter, so we can use std::unique_ptr.
+ struct HandleDeleter {
+ void operator()(void* in) {
+ if (in != nullptr && dlclose(in) != 0) {
+ PLOG(ERROR) << "Could not close selinux handle.";
+ }
+ }
+ };
+
+ // Look for selinux library.
+ std::unique_ptr<void, HandleDeleter> selinux_handle(dlopen("libselinux.so", RTLD_NOW));
+ if (selinux_handle == nullptr) {
+ // Assume everything's OK if we can't open the library.
+ return true;
+ }
+ dlerror(); // Clean dlerror string.
+
+ void* restorecon_ptr = dlsym(selinux_handle.get(), "selinux_android_restorecon");
+ if (restorecon_ptr == nullptr) {
+ // Can't find the relabel function. That's bad. Make sure the zygote fails, as we have no
+ // other recourse to make this error obvious.
+ const char* error_string = dlerror();
+ LOG(FATAL) << "Could not find selinux restorecon function: "
+ << ((error_string != nullptr) ? error_string : "(unknown error)");
+ UNREACHABLE();
+ }
+
+ using RestoreconFn = int (*)(const char*, unsigned int);
+ constexpr unsigned int kRecursive = 4U;
+
+ RestoreconFn restorecon_fn = reinterpret_cast<RestoreconFn>(restorecon_ptr);
+ if (restorecon_fn(dalvik_cache_dir.c_str(), kRecursive) != 0) {
+ LOG(ERROR) << "Failed to restorecon " << dalvik_cache_dir;
+ return false;
+ }
+
+ return true;
+}
+
+} // namespace impl
+
+
+// We are relocating or generating the core image. We should get rid of everything. It is all
+// out-of-date. We also don't really care if this fails since it is just a convenience.
+// Adapted from prune_dex_cache(const char* subdir) in frameworks/native/cmds/installd/commands.c
+// Note this should only be used during first boot.
+static void PruneDalvikCache(InstructionSet isa) {
+ CHECK_NE(isa, kNone);
+ // Prune the base /data/dalvik-cache.
+ impl::DeleteDirectoryContents(GetDalvikCacheOrDie(".", false), false);
+ // Prune /data/dalvik-cache/<isa>.
+ impl::DeleteDirectoryContents(GetDalvikCacheOrDie(GetInstructionSetString(isa), false), false);
+}
+
+// We write out an empty file to the zygote's ISA specific cache dir at the start of
+// every zygote boot and delete it when the boot completes. If we find a file already
+// present, it usually means the boot didn't complete. We wipe the entire dalvik
+// cache if that's the case.
+static void MarkZygoteStart(const InstructionSet isa, const uint32_t max_failed_boots) {
+ const std::string isa_subdir = GetDalvikCacheOrDie(GetInstructionSetString(isa), false);
+ const std::string boot_marker = isa_subdir + "/.booting";
+ const char* file_name = boot_marker.c_str();
+
+ uint32_t num_failed_boots = 0;
+ std::unique_ptr<File> file(OS::OpenFileReadWrite(file_name));
+ if (file.get() == nullptr) {
+ file.reset(OS::CreateEmptyFile(file_name));
+
+ if (file.get() == nullptr) {
+ PLOG(WARNING) << "Failed to create boot marker.";
+ return;
+ }
+ } else {
+ if (!file->ReadFully(&num_failed_boots, sizeof(num_failed_boots))) {
+ PLOG(WARNING) << "Failed to read boot marker.";
+ file->Erase();
+ return;
+ }
+ }
+
+ if (max_failed_boots != 0 && num_failed_boots > max_failed_boots) {
+ LOG(WARNING) << "Incomplete boot detected. Pruning dalvik cache";
+ impl::DeleteDirectoryContents(isa_subdir, false);
+ }
+
+ ++num_failed_boots;
+ VLOG(startup) << "Number of failed boots on : " << boot_marker << " = " << num_failed_boots;
+
+ if (lseek(file->Fd(), 0, SEEK_SET) == -1) {
+ PLOG(WARNING) << "Failed to write boot marker.";
+ file->Erase();
+ return;
+ }
+
+ if (!file->WriteFully(&num_failed_boots, sizeof(num_failed_boots))) {
+ PLOG(WARNING) << "Failed to write boot marker.";
+ file->Erase();
+ return;
+ }
+
+ if (file->FlushCloseOrErase() != 0) {
+ PLOG(WARNING) << "Failed to flush boot marker.";
+ }
+}
+
+static void TryMoveOTAArtifacts(const std::string& cache_filename, bool dalvik_cache_exists) {
+ // We really assume here global means /data/dalvik-cache, and we'll inject 'ota.' Make sure
+ // that's true.
+ CHECK(StartsWith(cache_filename, "/data/dalvik-cache")) << cache_filename;
+
+ // Inject ota subdirectory.
+ std::string ota_filename(cache_filename);
+ ota_filename = ota_filename.insert(strlen("/data/"), "ota/");
+ CHECK(StartsWith(ota_filename, "/data/ota/dalvik-cache")) << ota_filename;
+
+ // See if the file exists.
+ if (OS::FileExists(ota_filename.c_str())) {
+ VLOG(startup) << "OTA directory does exist, checking for artifacts";
+
+ size_t last_slash = ota_filename.rfind('/');
+ CHECK_NE(last_slash, std::string::npos);
+ std::string ota_source_dir = ota_filename.substr(0, last_slash);
+
+ // We need the dalvik cache now, really.
+ if (dalvik_cache_exists) {
+ size_t last_cache_slash = cache_filename.rfind('/');
+ DCHECK_NE(last_cache_slash, std::string::npos);
+ std::string dalvik_cache_target_dir = cache_filename.substr(0, last_cache_slash);
+
+ // First clean the target cache.
+ impl::DeleteDirectoryContents(dalvik_cache_target_dir.c_str(), false);
+
+ // Now move things over.
+ impl::MoveOTAArtifacts(ota_source_dir.c_str(), dalvik_cache_target_dir.c_str());
+
+ // Last step: ensure the files have the right selinux label.
+ if (!impl::RelabelOTAFiles(dalvik_cache_target_dir)) {
+ // This isn't good. We potentially moved files, but they have the wrong label. Delete the
+ // files.
+ LOG(WARNING) << "Could not relabel files, must delete dalvik-cache.";
+ impl::DeleteDirectoryContents(dalvik_cache_target_dir.c_str(), false);
+ }
+ }
+
+ // Cleanup.
+ impl::DeleteDirectoryContents(ota_source_dir.c_str(), true);
+ impl::DeleteEmptyDirectoriesUpTo(ota_source_dir, "ota");
+ } else {
+ VLOG(startup) << "No OTA directory.";
+ }
+}
+
+} // namespace space
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_SPACE_IMAGE_SPACE_FS_H_
diff --git a/runtime/interpreter/mterp/arm/binopLit8.S b/runtime/interpreter/mterp/arm/binopLit8.S
index ec0b3c4..b8f0d92 100644
--- a/runtime/interpreter/mterp/arm/binopLit8.S
+++ b/runtime/interpreter/mterp/arm/binopLit8.S
@@ -13,7 +13,7 @@
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
/* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r3, #255 @ r2<- BB
GET_VREG r0, r2 @ r0<- vBB
diff --git a/runtime/interpreter/mterp/arm/binopWide.S b/runtime/interpreter/mterp/arm/binopWide.S
index 1d511ec..4d88001 100644
--- a/runtime/interpreter/mterp/arm/binopWide.S
+++ b/runtime/interpreter/mterp/arm/binopWide.S
@@ -19,9 +19,9 @@
mov rINST, rINST, lsr #8 @ rINST<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
- add r9, rFP, rINST, lsl #2 @ r9<- &fp[AA]
- add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
- add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
.if $chkzero
diff --git a/runtime/interpreter/mterp/arm/binopWide2addr.S b/runtime/interpreter/mterp/arm/binopWide2addr.S
index 81db48b..bb16335 100644
--- a/runtime/interpreter/mterp/arm/binopWide2addr.S
+++ b/runtime/interpreter/mterp/arm/binopWide2addr.S
@@ -16,8 +16,8 @@
/* binop/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
ubfx rINST, rINST, #8, #4 @ rINST<- A
- add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
- add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
+ VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
.if $chkzero
diff --git a/runtime/interpreter/mterp/arm/entry.S b/runtime/interpreter/mterp/arm/entry.S
index 4c5ffc5..981c036 100644
--- a/runtime/interpreter/mterp/arm/entry.S
+++ b/runtime/interpreter/mterp/arm/entry.S
@@ -47,8 +47,8 @@
/* set up "named" registers */
mov rSELF, r0
ldr r0, [r2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
- add rFP, r2, #SHADOWFRAME_VREGS_OFFSET @ point to insns[] (i.e. - the dalivk byte code).
- add rREFS, rFP, r0, lsl #2 @ point to reference array in shadow frame
+ add rFP, r2, #SHADOWFRAME_VREGS_OFFSET @ point to vregs.
+ VREG_INDEX_TO_ADDR rREFS, r0 @ point to reference array in shadow frame
ldr r0, [r2, #SHADOWFRAME_DEX_PC_OFFSET] @ Get starting dex_pc.
add rPC, r1, #CODEITEM_INSNS_OFFSET @ Point to base of insns[]
add rPC, rPC, r0, lsl #1 @ Create direct pointer to 1st dex opcode
diff --git a/runtime/interpreter/mterp/arm/fbinop2addr.S b/runtime/interpreter/mterp/arm/fbinop2addr.S
index b052a29..53c87a0 100644
--- a/runtime/interpreter/mterp/arm/fbinop2addr.S
+++ b/runtime/interpreter/mterp/arm/fbinop2addr.S
@@ -7,14 +7,12 @@
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- mov r9, rINST, lsr #8 @ r9<- A+
+ ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- and r9, r9, #15 @ r9<- A
- flds s1, [r3] @ s1<- vB
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ flds s1, [r3] @ s1<- vB
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
flds s0, [r9] @ s0<- vA
-
$instr @ s2<- op
GET_INST_OPCODE ip @ extract opcode from rINST
fsts s2, [r9] @ vAA<- s2
diff --git a/runtime/interpreter/mterp/arm/fbinopWide2addr.S b/runtime/interpreter/mterp/arm/fbinopWide2addr.S
index 4e7401d..9766e2c 100644
--- a/runtime/interpreter/mterp/arm/fbinopWide2addr.S
+++ b/runtime/interpreter/mterp/arm/fbinopWide2addr.S
@@ -8,11 +8,10 @@
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- mov r9, rINST, lsr #8 @ r9<- A+
+ ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- and r9, r9, #15 @ r9<- A
- fldd d1, [r3] @ d1<- vB
CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
+ fldd d1, [r3] @ d1<- vB
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
fldd d0, [r9] @ d0<- vA
diff --git a/runtime/interpreter/mterp/arm/funop.S b/runtime/interpreter/mterp/arm/funop.S
index d7a0859..1b8bb8b 100644
--- a/runtime/interpreter/mterp/arm/funop.S
+++ b/runtime/interpreter/mterp/arm/funop.S
@@ -6,11 +6,10 @@
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- mov r9, rINST, lsr #8 @ r9<- A+
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
flds s0, [r3] @ s0<- vB
+ ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- and r9, r9, #15 @ r9<- A
$instr @ s1<- op
GET_INST_OPCODE ip @ extract opcode from rINST
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
diff --git a/runtime/interpreter/mterp/arm/funopNarrower.S b/runtime/interpreter/mterp/arm/funopNarrower.S
index 9daec28..b9f758b 100644
--- a/runtime/interpreter/mterp/arm/funopNarrower.S
+++ b/runtime/interpreter/mterp/arm/funopNarrower.S
@@ -6,11 +6,10 @@
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- mov r9, rINST, lsr #8 @ r9<- A+
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
fldd d0, [r3] @ d0<- vB
+ ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- and r9, r9, #15 @ r9<- A
$instr @ s0<- op
GET_INST_OPCODE ip @ extract opcode from rINST
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
diff --git a/runtime/interpreter/mterp/arm/funopWider.S b/runtime/interpreter/mterp/arm/funopWider.S
index 450ba3a..854cdc9 100644
--- a/runtime/interpreter/mterp/arm/funopWider.S
+++ b/runtime/interpreter/mterp/arm/funopWider.S
@@ -6,11 +6,10 @@
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- mov r9, rINST, lsr #8 @ r9<- A+
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
flds s0, [r3] @ s0<- vB
+ ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- and r9, r9, #15 @ r9<- A
$instr @ d0<- op
CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
GET_INST_OPCODE ip @ extract opcode from rINST
diff --git a/runtime/interpreter/mterp/arm/op_aget_wide.S b/runtime/interpreter/mterp/arm/op_aget_wide.S
index e1430b4..853a7a4 100644
--- a/runtime/interpreter/mterp/arm/op_aget_wide.S
+++ b/runtime/interpreter/mterp/arm/op_aget_wide.S
@@ -19,7 +19,7 @@
bcs common_errArrayIndex @ index >= length, bail
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
ldrd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
- add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3
GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_aput_wide.S b/runtime/interpreter/mterp/arm/op_aput_wide.S
index 49839d1..0057507 100644
--- a/runtime/interpreter/mterp/arm/op_aput_wide.S
+++ b/runtime/interpreter/mterp/arm/op_aput_wide.S
@@ -15,7 +15,7 @@
ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
cmp r1, r3 @ compare unsigned index, length
- add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
bcs common_errArrayIndex @ index >= length, bail
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1
diff --git a/runtime/interpreter/mterp/arm/op_cmp_long.S b/runtime/interpreter/mterp/arm/op_cmp_long.S
index 2b4c0ea..e57b19c 100644
--- a/runtime/interpreter/mterp/arm/op_cmp_long.S
+++ b/runtime/interpreter/mterp/arm/op_cmp_long.S
@@ -23,8 +23,8 @@
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
- add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
- add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
cmp r1, r3 @ compare (vBB+1, vCC+1)
diff --git a/runtime/interpreter/mterp/arm/op_const.S b/runtime/interpreter/mterp/arm/op_const.S
index de3e3c3..39890a0 100644
--- a/runtime/interpreter/mterp/arm/op_const.S
+++ b/runtime/interpreter/mterp/arm/op_const.S
@@ -1,7 +1,7 @@
/* const vAA, #+BBBBbbbb */
mov r3, rINST, lsr #8 @ r3<- AA
- FETCH r0, 1 @ r0<- bbbb (low
- FETCH r1, 2 @ r1<- BBBB (high
+ FETCH r0, 1 @ r0<- bbbb (low)
+ FETCH r1, 2 @ r1<- BBBB (high)
FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
GET_INST_OPCODE ip @ extract opcode from rINST
diff --git a/runtime/interpreter/mterp/arm/op_const_16.S b/runtime/interpreter/mterp/arm/op_const_16.S
index 59c6dac..a30cf3a 100644
--- a/runtime/interpreter/mterp/arm/op_const_16.S
+++ b/runtime/interpreter/mterp/arm/op_const_16.S
@@ -1,5 +1,5 @@
/* const/16 vAA, #+BBBB */
- FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended
+ FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended)
mov r3, rINST, lsr #8 @ r3<- AA
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
SET_VREG r0, r3 @ vAA<- r0
diff --git a/runtime/interpreter/mterp/arm/op_const_4.S b/runtime/interpreter/mterp/arm/op_const_4.S
index c177bb9..c97b0e9 100644
--- a/runtime/interpreter/mterp/arm/op_const_4.S
+++ b/runtime/interpreter/mterp/arm/op_const_4.S
@@ -1,8 +1,7 @@
/* const/4 vA, #+B */
- mov r1, rINST, lsl #16 @ r1<- Bxxx0000
+ sbfx r1, rINST, #12, #4 @ r1<- sssssssB (sign-extended)
ubfx r0, rINST, #8, #4 @ r0<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- mov r1, r1, asr #28 @ r1<- sssssssB (sign-extended)
GET_INST_OPCODE ip @ ip<- opcode from rINST
SET_VREG r1, r0 @ fp[A]<- r1
GOTO_OPCODE ip @ execute next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_high16.S b/runtime/interpreter/mterp/arm/op_const_high16.S
index 460d546..536276d 100644
--- a/runtime/interpreter/mterp/arm/op_const_high16.S
+++ b/runtime/interpreter/mterp/arm/op_const_high16.S
@@ -1,5 +1,5 @@
/* const/high16 vAA, #+BBBB0000 */
- FETCH r0, 1 @ r0<- 0000BBBB (zero-extended
+ FETCH r0, 1 @ r0<- 0000BBBB (zero-extended)
mov r3, rINST, lsr #8 @ r3<- AA
mov r0, r0, lsl #16 @ r0<- BBBB0000
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
diff --git a/runtime/interpreter/mterp/arm/op_const_string_jumbo.S b/runtime/interpreter/mterp/arm/op_const_string_jumbo.S
index 1a3d0b2..1255c07 100644
--- a/runtime/interpreter/mterp/arm/op_const_string_jumbo.S
+++ b/runtime/interpreter/mterp/arm/op_const_string_jumbo.S
@@ -1,7 +1,7 @@
/* const/string vAA, String@BBBBBBBB */
EXPORT_PC
- FETCH r0, 1 @ r0<- bbbb (low
- FETCH r2, 2 @ r2<- BBBB (high
+ FETCH r0, 1 @ r0<- bbbb (low)
+ FETCH r2, 2 @ r2<- BBBB (high)
mov r1, rINST, lsr #8 @ r1<- AA
orr r0, r0, r2, lsl #16 @ r1<- BBBBbbbb
add r2, rFP, #OFF_FP_SHADOWFRAME
diff --git a/runtime/interpreter/mterp/arm/op_const_wide.S b/runtime/interpreter/mterp/arm/op_const_wide.S
index 12394b6..8310a4c 100644
--- a/runtime/interpreter/mterp/arm/op_const_wide.S
+++ b/runtime/interpreter/mterp/arm/op_const_wide.S
@@ -8,7 +8,7 @@
orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word)
CLEAR_SHADOW_PAIR r9, r2, r3 @ Zero out the shadow regs
FETCH_ADVANCE_INST 5 @ advance rPC, load rINST
- add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r9, {r0-r1} @ vAA<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_wide_16.S b/runtime/interpreter/mterp/arm/op_const_wide_16.S
index 3811d86..28abb51 100644
--- a/runtime/interpreter/mterp/arm/op_const_wide_16.S
+++ b/runtime/interpreter/mterp/arm/op_const_wide_16.S
@@ -1,10 +1,10 @@
/* const-wide/16 vAA, #+BBBB */
- FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended
+ FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended)
mov r3, rINST, lsr #8 @ r3<- AA
mov r1, r0, asr #31 @ r1<- ssssssss
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
CLEAR_SHADOW_PAIR r3, r2, lr @ Zero out the shadow regs
- add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r3, {r0-r1} @ vAA<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_wide_32.S b/runtime/interpreter/mterp/arm/op_const_wide_32.S
index 0b6f1cc..c10bb04 100644
--- a/runtime/interpreter/mterp/arm/op_const_wide_32.S
+++ b/runtime/interpreter/mterp/arm/op_const_wide_32.S
@@ -5,7 +5,7 @@
FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb
CLEAR_SHADOW_PAIR r3, r2, lr @ Zero out the shadow regs
- add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
mov r1, r0, asr #31 @ r1<- ssssssss
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r3, {r0-r1} @ vAA<- r0/r1
diff --git a/runtime/interpreter/mterp/arm/op_const_wide_high16.S b/runtime/interpreter/mterp/arm/op_const_wide_high16.S
index b9796eb..d7e38ec 100644
--- a/runtime/interpreter/mterp/arm/op_const_wide_high16.S
+++ b/runtime/interpreter/mterp/arm/op_const_wide_high16.S
@@ -5,7 +5,7 @@
mov r1, r1, lsl #16 @ r1<- BBBB0000
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
CLEAR_SHADOW_PAIR r3, r0, r2 @ Zero shadow regs
- add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r3, {r0-r1} @ vAA<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iget_wide.S b/runtime/interpreter/mterp/arm/op_iget_wide.S
index 859ffac..e287d51 100644
--- a/runtime/interpreter/mterp/arm/op_iget_wide.S
+++ b/runtime/interpreter/mterp/arm/op_iget_wide.S
@@ -16,7 +16,7 @@
cmp r3, #0
bne MterpException @ bail out
CLEAR_SHADOW_PAIR r2, ip, lr @ Zero out the shadow regs
- add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
+ VREG_INDEX_TO_ADDR r3, r2 @ r3<- &fp[A]
stmia r3, {r0-r1} @ fp[A]<- r0/r1
ADVANCE 2
GET_INST_OPCODE ip @ extract opcode from rINST
diff --git a/runtime/interpreter/mterp/arm/op_iget_wide_quick.S b/runtime/interpreter/mterp/arm/op_iget_wide_quick.S
index 07f854a..5a7177d 100644
--- a/runtime/interpreter/mterp/arm/op_iget_wide_quick.S
+++ b/runtime/interpreter/mterp/arm/op_iget_wide_quick.S
@@ -7,7 +7,7 @@
beq common_errNullObject @ object was null
ldrd r0, [r3, ip] @ r0<- obj.field (64 bits, aligned)
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
+ VREG_INDEX_TO_ADDR r3, r2 @ r3<- &fp[A]
CLEAR_SHADOW_PAIR r2, ip, lr @ Zero out the shadow regs
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r3, {r0-r1} @ fp[A]<- r0/r1
diff --git a/runtime/interpreter/mterp/arm/op_instance_of.S b/runtime/interpreter/mterp/arm/op_instance_of.S
index d76f0b0..019929e 100644
--- a/runtime/interpreter/mterp/arm/op_instance_of.S
+++ b/runtime/interpreter/mterp/arm/op_instance_of.S
@@ -11,10 +11,9 @@
VREG_INDEX_TO_ADDR r1, r1 @ r1<- &object
ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- method
mov r3, rSELF @ r3<- self
- mov r9, rINST, lsr #8 @ r9<- A+
- and r9, r9, #15 @ r9<- A
bl MterpInstanceOf @ (index, &obj, method, self)
ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx r9, rINST, #8, #4 @ r9<- A
PREFETCH_INST 2
cmp r1, #0 @ exception pending?
bne MterpException
diff --git a/runtime/interpreter/mterp/arm/op_iput_wide.S b/runtime/interpreter/mterp/arm/op_iput_wide.S
index 8bbd63e..3dda187 100644
--- a/runtime/interpreter/mterp/arm/op_iput_wide.S
+++ b/runtime/interpreter/mterp/arm/op_iput_wide.S
@@ -5,7 +5,7 @@
mov r1, rINST, lsr #12 @ r1<- B
GET_VREG r1, r1 @ r1<- fp[B], the object pointer
ubfx r2, rINST, #8, #4 @ r2<- A
- add r2, rFP, r2, lsl #2 @ r2<- &fp[A]
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[A]
ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
PREFETCH_INST 2
bl artSet64InstanceFromMterp
diff --git a/runtime/interpreter/mterp/arm/op_iput_wide_quick.S b/runtime/interpreter/mterp/arm/op_iput_wide_quick.S
index a2fc9e1..88e6ea1 100644
--- a/runtime/interpreter/mterp/arm/op_iput_wide_quick.S
+++ b/runtime/interpreter/mterp/arm/op_iput_wide_quick.S
@@ -5,7 +5,7 @@
ubfx r0, rINST, #8, #4 @ r0<- A
cmp r2, #0 @ check object for null
beq common_errNullObject @ object was null
- add r0, rFP, r0, lsl #2 @ r0<- &fp[A]
+ VREG_INDEX_TO_ADDR r0, r0 @ r0<- &fp[A]
ldmia r0, {r0-r1} @ r0/r1<- fp[A]/fp[A+1]
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
strd r0, [r2, r3] @ obj.field<- r0/r1
diff --git a/runtime/interpreter/mterp/arm/op_long_to_double.S b/runtime/interpreter/mterp/arm/op_long_to_double.S
index 1d48a2a..cac12d4 100644
--- a/runtime/interpreter/mterp/arm/op_long_to_double.S
+++ b/runtime/interpreter/mterp/arm/op_long_to_double.S
@@ -8,8 +8,8 @@
*/
mov r3, rINST, lsr #12 @ r3<- B
ubfx r9, rINST, #8, #4 @ r9<- A
- add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
- add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
vldr d0, [r3] @ d0<- vAA
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
diff --git a/runtime/interpreter/mterp/arm/op_move_result_wide.S b/runtime/interpreter/mterp/arm/op_move_result_wide.S
index 1845ccf..87929ea 100644
--- a/runtime/interpreter/mterp/arm/op_move_result_wide.S
+++ b/runtime/interpreter/mterp/arm/op_move_result_wide.S
@@ -1,7 +1,7 @@
/* move-result-wide vAA */
mov rINST, rINST, lsr #8 @ rINST<- AA
ldr r3, [rFP, #OFF_FP_RESULT_REGISTER]
- add r2, rFP, rINST, lsl #2 @ r2<- &fp[AA]
+ VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[AA]
ldmia r3, {r0-r1} @ r0/r1<- retval.j
CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
diff --git a/runtime/interpreter/mterp/arm/op_move_wide.S b/runtime/interpreter/mterp/arm/op_move_wide.S
index f5d156d..ff353ea 100644
--- a/runtime/interpreter/mterp/arm/op_move_wide.S
+++ b/runtime/interpreter/mterp/arm/op_move_wide.S
@@ -2,8 +2,8 @@
/* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
mov r3, rINST, lsr #12 @ r3<- B
ubfx rINST, rINST, #8, #4 @ rINST<- A
- add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
- add r2, rFP, rINST, lsl #2 @ r2<- &fp[A]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
+ VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[A]
ldmia r3, {r0-r1} @ r0/r1<- fp[B]
CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
diff --git a/runtime/interpreter/mterp/arm/op_move_wide_16.S b/runtime/interpreter/mterp/arm/op_move_wide_16.S
index 8a55c4b..9812b66 100644
--- a/runtime/interpreter/mterp/arm/op_move_wide_16.S
+++ b/runtime/interpreter/mterp/arm/op_move_wide_16.S
@@ -2,8 +2,8 @@
/* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
FETCH r3, 2 @ r3<- BBBB
FETCH r2, 1 @ r2<- AAAA
- add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB]
- add lr, rFP, r2, lsl #2 @ r2<- &fp[AAAA]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BBBB]
+ VREG_INDEX_TO_ADDR lr, r2 @ r2<- &fp[AAAA]
ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
CLEAR_SHADOW_PAIR r2, r3, ip @ Zero out the shadow regs
diff --git a/runtime/interpreter/mterp/arm/op_move_wide_from16.S b/runtime/interpreter/mterp/arm/op_move_wide_from16.S
index b65259d..d2cc60c 100644
--- a/runtime/interpreter/mterp/arm/op_move_wide_from16.S
+++ b/runtime/interpreter/mterp/arm/op_move_wide_from16.S
@@ -2,8 +2,8 @@
/* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
FETCH r3, 1 @ r3<- BBBB
mov rINST, rINST, lsr #8 @ rINST<- AA
- add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB]
- add r2, rFP, rINST, lsl #2 @ r2<- &fp[AA]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BBBB]
+ VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[AA]
ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
diff --git a/runtime/interpreter/mterp/arm/op_mul_long.S b/runtime/interpreter/mterp/arm/op_mul_long.S
index 9e83778..8f40f19 100644
--- a/runtime/interpreter/mterp/arm/op_mul_long.S
+++ b/runtime/interpreter/mterp/arm/op_mul_long.S
@@ -20,8 +20,8 @@
FETCH r0, 1 @ r0<- CCBB
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
- add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
- add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
mul ip, r2, r1 @ ip<- ZxW
@@ -29,7 +29,7 @@
mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
mov r0, rINST, lsr #8 @ r0<- AA
add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
- add r0, rFP, r0, lsl #2 @ r0<- &fp[AA]
+ VREG_INDEX_TO_ADDR r0, r0 @ r0<- &fp[AA]
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10
diff --git a/runtime/interpreter/mterp/arm/op_mul_long_2addr.S b/runtime/interpreter/mterp/arm/op_mul_long_2addr.S
index 789dbd3..7ef24c5 100644
--- a/runtime/interpreter/mterp/arm/op_mul_long_2addr.S
+++ b/runtime/interpreter/mterp/arm/op_mul_long_2addr.S
@@ -9,8 +9,8 @@
/* mul-long/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
ubfx r9, rINST, #8, #4 @ r9<- A
- add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
- add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A]
+ VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
+ VREG_INDEX_TO_ADDR rINST, r9 @ rINST<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1
mul ip, r2, r1 @ ip<- ZxW
diff --git a/runtime/interpreter/mterp/arm/op_return_wide.S b/runtime/interpreter/mterp/arm/op_return_wide.S
index cfab530..ceae878 100644
--- a/runtime/interpreter/mterp/arm/op_return_wide.S
+++ b/runtime/interpreter/mterp/arm/op_return_wide.S
@@ -9,6 +9,6 @@
ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
blne MterpSuspendCheck @ (self)
mov r2, rINST, lsr #8 @ r2<- AA
- add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[AA]
ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1
b MterpReturn
diff --git a/runtime/interpreter/mterp/arm/op_sget_wide.S b/runtime/interpreter/mterp/arm/op_sget_wide.S
index 3a50908..4f2f89d 100644
--- a/runtime/interpreter/mterp/arm/op_sget_wide.S
+++ b/runtime/interpreter/mterp/arm/op_sget_wide.S
@@ -12,7 +12,7 @@
bl artGet64StaticFromCode
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
mov r9, rINST, lsr #8 @ r9<- AA
- add lr, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ VREG_INDEX_TO_ADDR lr, r9 @ r9<- &fp[AA]
cmp r3, #0 @ Fail to resolve?
bne MterpException @ bail out
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
diff --git a/runtime/interpreter/mterp/arm/op_shl_long.S b/runtime/interpreter/mterp/arm/op_shl_long.S
index 12ea248..82ec6ed 100644
--- a/runtime/interpreter/mterp/arm/op_shl_long.S
+++ b/runtime/interpreter/mterp/arm/op_shl_long.S
@@ -9,12 +9,12 @@
mov r9, rINST, lsr #8 @ r9<- AA
and r3, r0, #255 @ r3<- BB
mov r0, r0, lsr #8 @ r0<- CC
- add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
GET_VREG r2, r0 @ r2<- vCC
ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
and r2, r2, #63 @ r2<- r2 & 0x3f
- add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
mov r1, r1, asl r2 @ r1<- r1 << r2
rsb r3, r2, #32 @ r3<- 32 - r2
orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
diff --git a/runtime/interpreter/mterp/arm/op_shl_long_2addr.S b/runtime/interpreter/mterp/arm/op_shl_long_2addr.S
index 4799e77..f361a7d 100644
--- a/runtime/interpreter/mterp/arm/op_shl_long_2addr.S
+++ b/runtime/interpreter/mterp/arm/op_shl_long_2addr.S
@@ -7,7 +7,7 @@
ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r2, r3 @ r2<- vB
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
and r2, r2, #63 @ r2<- r2 & 0x3f
ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
mov r1, r1, asl r2 @ r1<- r1 << r2
diff --git a/runtime/interpreter/mterp/arm/op_shr_long.S b/runtime/interpreter/mterp/arm/op_shr_long.S
index 88a13d6..a0afe5b 100644
--- a/runtime/interpreter/mterp/arm/op_shr_long.S
+++ b/runtime/interpreter/mterp/arm/op_shr_long.S
@@ -9,12 +9,12 @@
mov r9, rINST, lsr #8 @ r9<- AA
and r3, r0, #255 @ r3<- BB
mov r0, r0, lsr #8 @ r0<- CC
- add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
GET_VREG r2, r0 @ r2<- vCC
ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
and r2, r2, #63 @ r0<- r0 & 0x3f
- add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
mov r0, r0, lsr r2 @ r0<- r2 >> r2
rsb r3, r2, #32 @ r3<- 32 - r2
orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
diff --git a/runtime/interpreter/mterp/arm/op_shr_long_2addr.S b/runtime/interpreter/mterp/arm/op_shr_long_2addr.S
index 78d8bb7..976110e 100644
--- a/runtime/interpreter/mterp/arm/op_shr_long_2addr.S
+++ b/runtime/interpreter/mterp/arm/op_shr_long_2addr.S
@@ -7,7 +7,7 @@
ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r2, r3 @ r2<- vB
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
and r2, r2, #63 @ r2<- r2 & 0x3f
ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
mov r0, r0, lsr r2 @ r0<- r2 >> r2
diff --git a/runtime/interpreter/mterp/arm/op_sput_wide.S b/runtime/interpreter/mterp/arm/op_sput_wide.S
index adbcffa..8d8ed8c 100644
--- a/runtime/interpreter/mterp/arm/op_sput_wide.S
+++ b/runtime/interpreter/mterp/arm/op_sput_wide.S
@@ -8,7 +8,7 @@
FETCH r0, 1 @ r0<- field ref BBBB
ldr r1, [rFP, #OFF_FP_METHOD]
mov r2, rINST, lsr #8 @ r3<- AA
- add r2, rFP, r2, lsl #2
+ VREG_INDEX_TO_ADDR r2, r2
mov r3, rSELF
PREFETCH_INST 2 @ Get next inst, but don't advance rPC
bl artSet64IndirectStaticFromMterp
diff --git a/runtime/interpreter/mterp/arm/op_ushr_long.S b/runtime/interpreter/mterp/arm/op_ushr_long.S
index f98ec63..c817bc9 100644
--- a/runtime/interpreter/mterp/arm/op_ushr_long.S
+++ b/runtime/interpreter/mterp/arm/op_ushr_long.S
@@ -9,12 +9,12 @@
mov r9, rINST, lsr #8 @ r9<- AA
and r3, r0, #255 @ r3<- BB
mov r0, r0, lsr #8 @ r0<- CC
- add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
GET_VREG r2, r0 @ r2<- vCC
ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
and r2, r2, #63 @ r0<- r0 & 0x3f
- add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
mov r0, r0, lsr r2 @ r0<- r2 >> r2
rsb r3, r2, #32 @ r3<- 32 - r2
orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
diff --git a/runtime/interpreter/mterp/arm/op_ushr_long_2addr.S b/runtime/interpreter/mterp/arm/op_ushr_long_2addr.S
index 840283d..2735f87 100644
--- a/runtime/interpreter/mterp/arm/op_ushr_long_2addr.S
+++ b/runtime/interpreter/mterp/arm/op_ushr_long_2addr.S
@@ -7,7 +7,7 @@
ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r2, r3 @ r2<- vB
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
and r2, r2, #63 @ r2<- r2 & 0x3f
ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
mov r0, r0, lsr r2 @ r0<- r2 >> r2
diff --git a/runtime/interpreter/mterp/arm/unopNarrower.S b/runtime/interpreter/mterp/arm/unopNarrower.S
index a5fc027..2d0453a 100644
--- a/runtime/interpreter/mterp/arm/unopNarrower.S
+++ b/runtime/interpreter/mterp/arm/unopNarrower.S
@@ -12,7 +12,7 @@
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
ubfx r9, rINST, #8, #4 @ r9<- A
- add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$preinstr @ optional op; may set condition codes
diff --git a/runtime/interpreter/mterp/arm/unopWide.S b/runtime/interpreter/mterp/arm/unopWide.S
index a074234..cd5defd 100644
--- a/runtime/interpreter/mterp/arm/unopWide.S
+++ b/runtime/interpreter/mterp/arm/unopWide.S
@@ -9,8 +9,8 @@
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
ubfx rINST, rINST, #8, #4 @ rINST<- A
- add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
- add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
ldmia r3, {r0-r1} @ r0/r1<- vAA
CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
diff --git a/runtime/interpreter/mterp/arm/unopWider.S b/runtime/interpreter/mterp/arm/unopWider.S
index 23b6b9d..9d50489 100644
--- a/runtime/interpreter/mterp/arm/unopWider.S
+++ b/runtime/interpreter/mterp/arm/unopWider.S
@@ -10,7 +10,7 @@
mov r3, rINST, lsr #12 @ r3<- B
ubfx rINST, rINST, #8, #4 @ rINST<- A
GET_VREG r0, r3 @ r0<- vB
- add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
$preinstr @ optional op; may set condition codes
CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
diff --git a/runtime/interpreter/mterp/arm64/entry.S b/runtime/interpreter/mterp/arm64/entry.S
index f9073ab..23e656e 100644
--- a/runtime/interpreter/mterp/arm64/entry.S
+++ b/runtime/interpreter/mterp/arm64/entry.S
@@ -46,7 +46,7 @@
/* set up "named" registers */
mov xSELF, x0
ldr w0, [x2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
- add xFP, x2, #SHADOWFRAME_VREGS_OFFSET // point to insns[] (i.e. - the dalivk byte code).
+ add xFP, x2, #SHADOWFRAME_VREGS_OFFSET // point to vregs.
add xREFS, xFP, w0, lsl #2 // point to reference array in shadow frame
ldr w0, [x2, #SHADOWFRAME_DEX_PC_OFFSET] // Get starting dex_pc.
add xPC, x1, #CODEITEM_INSNS_OFFSET // Point to base of insns[]
diff --git a/runtime/interpreter/mterp/arm64/header.S b/runtime/interpreter/mterp/arm64/header.S
index 7223750..7101ba9 100644
--- a/runtime/interpreter/mterp/arm64/header.S
+++ b/runtime/interpreter/mterp/arm64/header.S
@@ -272,7 +272,7 @@
* Convert a virtual register index into an address.
*/
.macro VREG_INDEX_TO_ADDR reg, vreg
- add \reg, xFP, \vreg, lsl #2 /* WARNING/FIXME: handle shadow frame vreg zero if store */
+ add \reg, xFP, \vreg, lsl #2 /* WARNING: handle shadow frame vreg zero if store */
.endm
/*
diff --git a/runtime/interpreter/mterp/arm64/op_iput_wide.S b/runtime/interpreter/mterp/arm64/op_iput_wide.S
index 4ce9525..e1ab127 100644
--- a/runtime/interpreter/mterp/arm64/op_iput_wide.S
+++ b/runtime/interpreter/mterp/arm64/op_iput_wide.S
@@ -5,7 +5,7 @@
lsr w1, wINST, #12 // w1<- B
GET_VREG w1, w1 // w1<- fp[B], the object pointer
ubfx w2, wINST, #8, #4 // w2<- A
- add x2, xFP, x2, lsl #2 // w2<- &fp[A]
+ VREG_INDEX_TO_ADDR x2, x2 // w2<- &fp[A]
ldr x3, [xFP, #OFF_FP_METHOD] // w3<- referrer
PREFETCH_INST 2
bl artSet64InstanceFromMterp
diff --git a/runtime/interpreter/mterp/arm64/op_sput_wide.S b/runtime/interpreter/mterp/arm64/op_sput_wide.S
index 1d034ec..a79b1a6 100644
--- a/runtime/interpreter/mterp/arm64/op_sput_wide.S
+++ b/runtime/interpreter/mterp/arm64/op_sput_wide.S
@@ -8,7 +8,7 @@
FETCH w0, 1 // w0<- field ref BBBB
ldr x1, [xFP, #OFF_FP_METHOD]
lsr w2, wINST, #8 // w3<- AA
- add x2, xFP, w2, lsl #2
+ VREG_INDEX_TO_ADDR x2, w2
mov x3, xSELF
PREFETCH_INST 2 // Get next inst, but don't advance rPC
bl artSet64IndirectStaticFromMterp
diff --git a/runtime/interpreter/mterp/mips/entry.S b/runtime/interpreter/mterp/mips/entry.S
index cef08fe..5771a4f 100644
--- a/runtime/interpreter/mterp/mips/entry.S
+++ b/runtime/interpreter/mterp/mips/entry.S
@@ -49,7 +49,7 @@
/* set up "named" registers */
move rSELF, a0
lw a0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
- addu rFP, a2, SHADOWFRAME_VREGS_OFFSET # point to insns[] (i.e. - the dalivk byte code).
+ addu rFP, a2, SHADOWFRAME_VREGS_OFFSET # point to vregs.
EAS2(rREFS, rFP, a0) # point to reference array in shadow frame
lw a0, SHADOWFRAME_DEX_PC_OFFSET(a2) # Get starting dex_pc
addu rPC, a1, CODEITEM_INSNS_OFFSET # Point to base of insns[]
diff --git a/runtime/interpreter/mterp/out/mterp_arm.S b/runtime/interpreter/mterp/out/mterp_arm.S
index b26a63a..092474d 100644
--- a/runtime/interpreter/mterp/out/mterp_arm.S
+++ b/runtime/interpreter/mterp/out/mterp_arm.S
@@ -343,8 +343,8 @@
/* set up "named" registers */
mov rSELF, r0
ldr r0, [r2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
- add rFP, r2, #SHADOWFRAME_VREGS_OFFSET @ point to insns[] (i.e. - the dalivk byte code).
- add rREFS, rFP, r0, lsl #2 @ point to reference array in shadow frame
+ add rFP, r2, #SHADOWFRAME_VREGS_OFFSET @ point to vregs.
+ VREG_INDEX_TO_ADDR rREFS, r0 @ point to reference array in shadow frame
ldr r0, [r2, #SHADOWFRAME_DEX_PC_OFFSET] @ Get starting dex_pc.
add rPC, r1, #CODEITEM_INSNS_OFFSET @ Point to base of insns[]
add rPC, rPC, r0, lsl #1 @ Create direct pointer to 1st dex opcode
@@ -435,8 +435,8 @@
/* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
mov r3, rINST, lsr #12 @ r3<- B
ubfx rINST, rINST, #8, #4 @ rINST<- A
- add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
- add r2, rFP, rINST, lsl #2 @ r2<- &fp[A]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
+ VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[A]
ldmia r3, {r0-r1} @ r0/r1<- fp[B]
CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
@@ -452,8 +452,8 @@
/* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
FETCH r3, 1 @ r3<- BBBB
mov rINST, rINST, lsr #8 @ rINST<- AA
- add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB]
- add r2, rFP, rINST, lsl #2 @ r2<- &fp[AA]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BBBB]
+ VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[AA]
ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
@@ -469,8 +469,8 @@
/* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
FETCH r3, 2 @ r3<- BBBB
FETCH r2, 1 @ r2<- AAAA
- add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB]
- add lr, rFP, r2, lsl #2 @ r2<- &fp[AAAA]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BBBB]
+ VREG_INDEX_TO_ADDR lr, r2 @ r2<- &fp[AAAA]
ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
CLEAR_SHADOW_PAIR r2, r3, ip @ Zero out the shadow regs
@@ -563,7 +563,7 @@
/* move-result-wide vAA */
mov rINST, rINST, lsr #8 @ rINST<- AA
ldr r3, [rFP, #OFF_FP_RESULT_REGISTER]
- add r2, rFP, rINST, lsl #2 @ r2<- &fp[AA]
+ VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[AA]
ldmia r3, {r0-r1} @ r0/r1<- retval.j
CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
@@ -655,7 +655,7 @@
ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
blne MterpSuspendCheck @ (self)
mov r2, rINST, lsr #8 @ r2<- AA
- add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[AA]
ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1
b MterpReturn
@@ -687,10 +687,9 @@
.L_op_const_4: /* 0x12 */
/* File: arm/op_const_4.S */
/* const/4 vA, #+B */
- mov r1, rINST, lsl #16 @ r1<- Bxxx0000
+ sbfx r1, rINST, #12, #4 @ r1<- sssssssB (sign-extended)
ubfx r0, rINST, #8, #4 @ r0<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- mov r1, r1, asr #28 @ r1<- sssssssB (sign-extended)
GET_INST_OPCODE ip @ ip<- opcode from rINST
SET_VREG r1, r0 @ fp[A]<- r1
GOTO_OPCODE ip @ execute next instruction
@@ -700,7 +699,7 @@
.L_op_const_16: /* 0x13 */
/* File: arm/op_const_16.S */
/* const/16 vAA, #+BBBB */
- FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended
+ FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended)
mov r3, rINST, lsr #8 @ r3<- AA
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
SET_VREG r0, r3 @ vAA<- r0
@@ -713,8 +712,8 @@
/* File: arm/op_const.S */
/* const vAA, #+BBBBbbbb */
mov r3, rINST, lsr #8 @ r3<- AA
- FETCH r0, 1 @ r0<- bbbb (low
- FETCH r1, 2 @ r1<- BBBB (high
+ FETCH r0, 1 @ r0<- bbbb (low)
+ FETCH r1, 2 @ r1<- BBBB (high)
FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
GET_INST_OPCODE ip @ extract opcode from rINST
@@ -726,7 +725,7 @@
.L_op_const_high16: /* 0x15 */
/* File: arm/op_const_high16.S */
/* const/high16 vAA, #+BBBB0000 */
- FETCH r0, 1 @ r0<- 0000BBBB (zero-extended
+ FETCH r0, 1 @ r0<- 0000BBBB (zero-extended)
mov r3, rINST, lsr #8 @ r3<- AA
mov r0, r0, lsl #16 @ r0<- BBBB0000
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
@@ -739,12 +738,12 @@
.L_op_const_wide_16: /* 0x16 */
/* File: arm/op_const_wide_16.S */
/* const-wide/16 vAA, #+BBBB */
- FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended
+ FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended)
mov r3, rINST, lsr #8 @ r3<- AA
mov r1, r0, asr #31 @ r1<- ssssssss
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
CLEAR_SHADOW_PAIR r3, r2, lr @ Zero out the shadow regs
- add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r3, {r0-r1} @ vAA<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
@@ -760,7 +759,7 @@
FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb
CLEAR_SHADOW_PAIR r3, r2, lr @ Zero out the shadow regs
- add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
mov r1, r0, asr #31 @ r1<- ssssssss
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r3, {r0-r1} @ vAA<- r0/r1
@@ -780,7 +779,7 @@
orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word)
CLEAR_SHADOW_PAIR r9, r2, r3 @ Zero out the shadow regs
FETCH_ADVANCE_INST 5 @ advance rPC, load rINST
- add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r9, {r0-r1} @ vAA<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
@@ -796,7 +795,7 @@
mov r1, r1, lsl #16 @ r1<- BBBB0000
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
CLEAR_SHADOW_PAIR r3, r0, r2 @ Zero shadow regs
- add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r3, {r0-r1} @ vAA<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
@@ -825,8 +824,8 @@
/* File: arm/op_const_string_jumbo.S */
/* const/string vAA, String@BBBBBBBB */
EXPORT_PC
- FETCH r0, 1 @ r0<- bbbb (low
- FETCH r2, 2 @ r2<- BBBB (high
+ FETCH r0, 1 @ r0<- bbbb (low)
+ FETCH r2, 2 @ r2<- BBBB (high)
mov r1, rINST, lsr #8 @ r1<- AA
orr r0, r0, r2, lsl #16 @ r1<- BBBBbbbb
add r2, rFP, #OFF_FP_SHADOWFRAME
@@ -938,10 +937,9 @@
VREG_INDEX_TO_ADDR r1, r1 @ r1<- &object
ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- method
mov r3, rSELF @ r3<- self
- mov r9, rINST, lsr #8 @ r9<- A+
- and r9, r9, #15 @ r9<- A
bl MterpInstanceOf @ (index, &obj, method, self)
ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx r9, rINST, #8, #4 @ r9<- A
PREFETCH_INST 2
cmp r1, #0 @ exception pending?
bne MterpException
@@ -1509,8 +1507,8 @@
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
- add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
- add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
cmp r1, r3 @ compare (vBB+1, vCC+1)
@@ -2089,7 +2087,7 @@
bcs common_errArrayIndex @ index >= length, bail
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
ldrd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
- add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3
GOTO_OPCODE ip @ jump to next instruction
@@ -2314,7 +2312,7 @@
ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
cmp r1, r3 @ compare unsigned index, length
- add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
bcs common_errArrayIndex @ index >= length, bail
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1
@@ -2533,7 +2531,7 @@
cmp r3, #0
bne MterpException @ bail out
CLEAR_SHADOW_PAIR r2, ip, lr @ Zero out the shadow regs
- add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
+ VREG_INDEX_TO_ADDR r3, r2 @ r3<- &fp[A]
stmia r3, {r0-r1} @ fp[A]<- r0/r1
ADVANCE 2
GET_INST_OPCODE ip @ extract opcode from rINST
@@ -2736,7 +2734,7 @@
mov r1, rINST, lsr #12 @ r1<- B
GET_VREG r1, r1 @ r1<- fp[B], the object pointer
ubfx r2, rINST, #8, #4 @ r2<- A
- add r2, rFP, r2, lsl #2 @ r2<- &fp[A]
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[A]
ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
PREFETCH_INST 2
bl artSet64InstanceFromMterp
@@ -2923,7 +2921,7 @@
bl artGet64StaticFromCode
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
mov r9, rINST, lsr #8 @ r9<- AA
- add lr, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ VREG_INDEX_TO_ADDR lr, r9 @ r9<- &fp[AA]
cmp r3, #0 @ Fail to resolve?
bne MterpException @ bail out
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
@@ -3135,7 +3133,7 @@
FETCH r0, 1 @ r0<- field ref BBBB
ldr r1, [rFP, #OFF_FP_METHOD]
mov r2, rINST, lsr #8 @ r3<- AA
- add r2, rFP, r2, lsl #2
+ VREG_INDEX_TO_ADDR r2, r2
mov r3, rSELF
PREFETCH_INST 2 @ Get next inst, but don't advance rPC
bl artSet64IndirectStaticFromMterp
@@ -3668,8 +3666,8 @@
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
ubfx rINST, rINST, #8, #4 @ rINST<- A
- add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
- add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
ldmia r3, {r0-r1} @ r0/r1<- vAA
CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
@@ -3696,8 +3694,8 @@
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
ubfx rINST, rINST, #8, #4 @ rINST<- A
- add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
- add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
ldmia r3, {r0-r1} @ r0/r1<- vAA
CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
@@ -3750,8 +3748,8 @@
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
ubfx rINST, rINST, #8, #4 @ rINST<- A
- add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
- add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
ldmia r3, {r0-r1} @ r0/r1<- vAA
CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
@@ -3779,7 +3777,7 @@
mov r3, rINST, lsr #12 @ r3<- B
ubfx rINST, rINST, #8, #4 @ rINST<- A
GET_VREG r0, r3 @ r0<- vB
- add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
@ optional op; may set condition codes
CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
@@ -3803,11 +3801,10 @@
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- mov r9, rINST, lsr #8 @ r9<- A+
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
flds s0, [r3] @ s0<- vB
+ ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- and r9, r9, #15 @ r9<- A
fsitos s1, s0 @ s1<- op
GET_INST_OPCODE ip @ extract opcode from rINST
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
@@ -3828,11 +3825,10 @@
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- mov r9, rINST, lsr #8 @ r9<- A+
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
flds s0, [r3] @ s0<- vB
+ ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- and r9, r9, #15 @ r9<- A
fsitod d0, s0 @ d0<- op
CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
GET_INST_OPCODE ip @ extract opcode from rINST
@@ -3880,7 +3876,7 @@
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
ubfx r9, rINST, #8, #4 @ r9<- A
- add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
@ optional op; may set condition codes
@@ -3904,8 +3900,8 @@
*/
mov r3, rINST, lsr #12 @ r3<- B
ubfx r9, rINST, #8, #4 @ r9<- A
- add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
- add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
vldr d0, [r3] @ d0<- vAA
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
@@ -3935,11 +3931,10 @@
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- mov r9, rINST, lsr #8 @ r9<- A+
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
flds s0, [r3] @ s0<- vB
+ ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- and r9, r9, #15 @ r9<- A
ftosizs s1, s0 @ s1<- op
GET_INST_OPCODE ip @ extract opcode from rINST
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
@@ -3964,7 +3959,7 @@
mov r3, rINST, lsr #12 @ r3<- B
ubfx rINST, rINST, #8, #4 @ rINST<- A
GET_VREG r0, r3 @ r0<- vB
- add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
@ optional op; may set condition codes
CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
@@ -3989,11 +3984,10 @@
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- mov r9, rINST, lsr #8 @ r9<- A+
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
flds s0, [r3] @ s0<- vB
+ ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- and r9, r9, #15 @ r9<- A
vcvt.f64.f32 d0, s0 @ d0<- op
CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
GET_INST_OPCODE ip @ extract opcode from rINST
@@ -4015,11 +4009,10 @@
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- mov r9, rINST, lsr #8 @ r9<- A+
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
fldd d0, [r3] @ d0<- vB
+ ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- and r9, r9, #15 @ r9<- A
ftosizd s0, d0 @ s0<- op
GET_INST_OPCODE ip @ extract opcode from rINST
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
@@ -4043,8 +4036,8 @@
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
ubfx rINST, rINST, #8, #4 @ rINST<- A
- add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
- add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
ldmia r3, {r0-r1} @ r0/r1<- vAA
CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
@@ -4070,11 +4063,10 @@
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- mov r9, rINST, lsr #8 @ r9<- A+
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
fldd d0, [r3] @ d0<- vB
+ ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- and r9, r9, #15 @ r9<- A
vcvt.f32.f64 s0, d0 @ s0<- op
GET_INST_OPCODE ip @ extract opcode from rINST
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
@@ -4626,9 +4618,9 @@
mov rINST, rINST, lsr #8 @ rINST<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
- add r9, rFP, rINST, lsl #2 @ r9<- &fp[AA]
- add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
- add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
.if 0
@@ -4670,9 +4662,9 @@
mov rINST, rINST, lsr #8 @ rINST<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
- add r9, rFP, rINST, lsl #2 @ r9<- &fp[AA]
- add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
- add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
.if 0
@@ -4715,8 +4707,8 @@
FETCH r0, 1 @ r0<- CCBB
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
- add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
- add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
mul ip, r2, r1 @ ip<- ZxW
@@ -4724,7 +4716,7 @@
mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
mov r0, rINST, lsr #8 @ r0<- AA
add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
- add r0, rFP, r0, lsl #2 @ r0<- &fp[AA]
+ VREG_INDEX_TO_ADDR r0, r0 @ r0<- &fp[AA]
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10
@@ -4755,9 +4747,9 @@
mov rINST, rINST, lsr #8 @ rINST<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
- add r9, rFP, rINST, lsl #2 @ r9<- &fp[AA]
- add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
- add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
.if 1
@@ -4800,9 +4792,9 @@
mov rINST, rINST, lsr #8 @ rINST<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
- add r9, rFP, rINST, lsl #2 @ r9<- &fp[AA]
- add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
- add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
.if 1
@@ -4844,9 +4836,9 @@
mov rINST, rINST, lsr #8 @ rINST<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
- add r9, rFP, rINST, lsl #2 @ r9<- &fp[AA]
- add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
- add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
.if 0
@@ -4888,9 +4880,9 @@
mov rINST, rINST, lsr #8 @ rINST<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
- add r9, rFP, rINST, lsl #2 @ r9<- &fp[AA]
- add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
- add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
.if 0
@@ -4932,9 +4924,9 @@
mov rINST, rINST, lsr #8 @ rINST<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
- add r9, rFP, rINST, lsl #2 @ r9<- &fp[AA]
- add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
- add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
.if 0
@@ -4966,12 +4958,12 @@
mov r9, rINST, lsr #8 @ r9<- AA
and r3, r0, #255 @ r3<- BB
mov r0, r0, lsr #8 @ r0<- CC
- add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
GET_VREG r2, r0 @ r2<- vCC
ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
and r2, r2, #63 @ r2<- r2 & 0x3f
- add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
mov r1, r1, asl r2 @ r1<- r1 << r2
rsb r3, r2, #32 @ r3<- 32 - r2
orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
@@ -4998,12 +4990,12 @@
mov r9, rINST, lsr #8 @ r9<- AA
and r3, r0, #255 @ r3<- BB
mov r0, r0, lsr #8 @ r0<- CC
- add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
GET_VREG r2, r0 @ r2<- vCC
ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
and r2, r2, #63 @ r0<- r0 & 0x3f
- add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
mov r0, r0, lsr r2 @ r0<- r2 >> r2
rsb r3, r2, #32 @ r3<- 32 - r2
orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
@@ -5030,12 +5022,12 @@
mov r9, rINST, lsr #8 @ r9<- AA
and r3, r0, #255 @ r3<- BB
mov r0, r0, lsr #8 @ r0<- CC
- add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
GET_VREG r2, r0 @ r2<- vCC
ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
and r2, r2, #63 @ r0<- r0 & 0x3f
- add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
mov r0, r0, lsr r2 @ r0<- r2 >> r2
rsb r3, r2, #32 @ r3<- 32 - r2
orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
@@ -5355,9 +5347,9 @@
mov rINST, rINST, lsr #8 @ rINST<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
- add r9, rFP, rINST, lsl #2 @ r9<- &fp[AA]
- add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
- add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
.if 0
@@ -5808,8 +5800,8 @@
/* binop/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
ubfx rINST, rINST, #8, #4 @ rINST<- A
- add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
- add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
+ VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
.if 0
@@ -5848,8 +5840,8 @@
/* binop/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
ubfx rINST, rINST, #8, #4 @ rINST<- A
- add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
- add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
+ VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
.if 0
@@ -5881,8 +5873,8 @@
/* mul-long/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
ubfx r9, rINST, #8, #4 @ r9<- A
- add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
- add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A]
+ VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
+ VREG_INDEX_TO_ADDR rINST, r9 @ rINST<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1
mul ip, r2, r1 @ ip<- ZxW
@@ -5917,8 +5909,8 @@
/* binop/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
ubfx rINST, rINST, #8, #4 @ rINST<- A
- add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
- add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
+ VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
.if 1
@@ -5958,8 +5950,8 @@
/* binop/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
ubfx rINST, rINST, #8, #4 @ rINST<- A
- add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
- add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
+ VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
.if 1
@@ -5998,8 +5990,8 @@
/* binop/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
ubfx rINST, rINST, #8, #4 @ rINST<- A
- add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
- add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
+ VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
.if 0
@@ -6038,8 +6030,8 @@
/* binop/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
ubfx rINST, rINST, #8, #4 @ rINST<- A
- add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
- add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
+ VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
.if 0
@@ -6078,8 +6070,8 @@
/* binop/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
ubfx rINST, rINST, #8, #4 @ rINST<- A
- add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
- add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
+ VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
.if 0
@@ -6109,7 +6101,7 @@
ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r2, r3 @ r2<- vB
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
and r2, r2, #63 @ r2<- r2 & 0x3f
ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
mov r1, r1, asl r2 @ r1<- r1 << r2
@@ -6136,7 +6128,7 @@
ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r2, r3 @ r2<- vB
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
and r2, r2, #63 @ r2<- r2 & 0x3f
ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
mov r0, r0, lsr r2 @ r0<- r2 >> r2
@@ -6163,7 +6155,7 @@
ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r2, r3 @ r2<- vB
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
and r2, r2, #63 @ r2<- r2 & 0x3f
ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
mov r0, r0, lsr r2 @ r0<- r2 >> r2
@@ -6191,14 +6183,12 @@
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- mov r9, rINST, lsr #8 @ r9<- A+
+ ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- and r9, r9, #15 @ r9<- A
- flds s1, [r3] @ s1<- vB
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ flds s1, [r3] @ s1<- vB
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
flds s0, [r9] @ s0<- vA
-
fadds s2, s0, s1 @ s2<- op
GET_INST_OPCODE ip @ extract opcode from rINST
fsts s2, [r9] @ vAA<- s2
@@ -6219,14 +6209,12 @@
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- mov r9, rINST, lsr #8 @ r9<- A+
+ ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- and r9, r9, #15 @ r9<- A
- flds s1, [r3] @ s1<- vB
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ flds s1, [r3] @ s1<- vB
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
flds s0, [r9] @ s0<- vA
-
fsubs s2, s0, s1 @ s2<- op
GET_INST_OPCODE ip @ extract opcode from rINST
fsts s2, [r9] @ vAA<- s2
@@ -6247,14 +6235,12 @@
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- mov r9, rINST, lsr #8 @ r9<- A+
+ ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- and r9, r9, #15 @ r9<- A
- flds s1, [r3] @ s1<- vB
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ flds s1, [r3] @ s1<- vB
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
flds s0, [r9] @ s0<- vA
-
fmuls s2, s0, s1 @ s2<- op
GET_INST_OPCODE ip @ extract opcode from rINST
fsts s2, [r9] @ vAA<- s2
@@ -6275,14 +6261,12 @@
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- mov r9, rINST, lsr #8 @ r9<- A+
+ ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- and r9, r9, #15 @ r9<- A
- flds s1, [r3] @ s1<- vB
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ flds s1, [r3] @ s1<- vB
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
flds s0, [r9] @ s0<- vA
-
fdivs s2, s0, s1 @ s2<- op
GET_INST_OPCODE ip @ extract opcode from rINST
fsts s2, [r9] @ vAA<- s2
@@ -6343,11 +6327,10 @@
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- mov r9, rINST, lsr #8 @ r9<- A+
+ ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- and r9, r9, #15 @ r9<- A
- fldd d1, [r3] @ d1<- vB
CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
+ fldd d1, [r3] @ d1<- vB
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
fldd d0, [r9] @ d0<- vA
@@ -6372,11 +6355,10 @@
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- mov r9, rINST, lsr #8 @ r9<- A+
+ ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- and r9, r9, #15 @ r9<- A
- fldd d1, [r3] @ d1<- vB
CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
+ fldd d1, [r3] @ d1<- vB
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
fldd d0, [r9] @ d0<- vA
@@ -6401,11 +6383,10 @@
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- mov r9, rINST, lsr #8 @ r9<- A+
+ ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- and r9, r9, #15 @ r9<- A
- fldd d1, [r3] @ d1<- vB
CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
+ fldd d1, [r3] @ d1<- vB
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
fldd d0, [r9] @ d0<- vA
@@ -6430,11 +6411,10 @@
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- mov r9, rINST, lsr #8 @ r9<- A+
+ ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- and r9, r9, #15 @ r9<- A
- fldd d1, [r3] @ d1<- vB
CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
+ fldd d1, [r3] @ d1<- vB
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
fldd d0, [r9] @ d0<- vA
@@ -6467,8 +6447,8 @@
/* binop/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
ubfx rINST, rINST, #8, #4 @ rINST<- A
- add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
- add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
+ VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
.if 0
@@ -6783,7 +6763,7 @@
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
/* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r3, #255 @ r2<- BB
GET_VREG r0, r2 @ r0<- vBB
@@ -6821,7 +6801,7 @@
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
/* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r3, #255 @ r2<- BB
GET_VREG r0, r2 @ r0<- vBB
@@ -6860,7 +6840,7 @@
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
/* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r3, #255 @ r2<- BB
GET_VREG r0, r2 @ r0<- vBB
@@ -6967,7 +6947,7 @@
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
/* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r3, #255 @ r2<- BB
GET_VREG r0, r2 @ r0<- vBB
@@ -7005,7 +6985,7 @@
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
/* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r3, #255 @ r2<- BB
GET_VREG r0, r2 @ r0<- vBB
@@ -7043,7 +7023,7 @@
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
/* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r3, #255 @ r2<- BB
GET_VREG r0, r2 @ r0<- vBB
@@ -7081,7 +7061,7 @@
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
/* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r3, #255 @ r2<- BB
GET_VREG r0, r2 @ r0<- vBB
@@ -7119,7 +7099,7 @@
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
/* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r3, #255 @ r2<- BB
GET_VREG r0, r2 @ r0<- vBB
@@ -7157,7 +7137,7 @@
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
/* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r3, #255 @ r2<- BB
GET_VREG r0, r2 @ r0<- vBB
@@ -7207,7 +7187,7 @@
beq common_errNullObject @ object was null
ldrd r0, [r3, ip] @ r0<- obj.field (64 bits, aligned)
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
+ VREG_INDEX_TO_ADDR r3, r2 @ r3<- &fp[A]
CLEAR_SHADOW_PAIR r2, ip, lr @ Zero out the shadow regs
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r3, {r0-r1} @ fp[A]<- r0/r1
@@ -7263,7 +7243,7 @@
ubfx r0, rINST, #8, #4 @ r0<- A
cmp r2, #0 @ check object for null
beq common_errNullObject @ object was null
- add r0, rFP, r0, lsl #2 @ r0<- &fp[A]
+ VREG_INDEX_TO_ADDR r0, r0 @ r0<- &fp[A]
ldmia r0, {r0-r1} @ r0/r1<- fp[A]/fp[A+1]
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
strd r0, [r2, r3] @ obj.field<- r0/r1
diff --git a/runtime/interpreter/mterp/out/mterp_arm64.S b/runtime/interpreter/mterp/out/mterp_arm64.S
index c7c0fb5..cdb27e8 100644
--- a/runtime/interpreter/mterp/out/mterp_arm64.S
+++ b/runtime/interpreter/mterp/out/mterp_arm64.S
@@ -279,7 +279,7 @@
* Convert a virtual register index into an address.
*/
.macro VREG_INDEX_TO_ADDR reg, vreg
- add \reg, xFP, \vreg, lsl #2 /* WARNING/FIXME: handle shadow frame vreg zero if store */
+ add \reg, xFP, \vreg, lsl #2 /* WARNING: handle shadow frame vreg zero if store */
.endm
/*
@@ -338,7 +338,7 @@
/* set up "named" registers */
mov xSELF, x0
ldr w0, [x2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
- add xFP, x2, #SHADOWFRAME_VREGS_OFFSET // point to insns[] (i.e. - the dalivk byte code).
+ add xFP, x2, #SHADOWFRAME_VREGS_OFFSET // point to vregs.
add xREFS, xFP, w0, lsl #2 // point to reference array in shadow frame
ldr w0, [x2, #SHADOWFRAME_DEX_PC_OFFSET] // Get starting dex_pc.
add xPC, x1, #CODEITEM_INSNS_OFFSET // Point to base of insns[]
@@ -2552,7 +2552,7 @@
lsr w1, wINST, #12 // w1<- B
GET_VREG w1, w1 // w1<- fp[B], the object pointer
ubfx w2, wINST, #8, #4 // w2<- A
- add x2, xFP, x2, lsl #2 // w2<- &fp[A]
+ VREG_INDEX_TO_ADDR x2, x2 // w2<- &fp[A]
ldr x3, [xFP, #OFF_FP_METHOD] // w3<- referrer
PREFETCH_INST 2
bl artSet64InstanceFromMterp
@@ -2941,7 +2941,7 @@
FETCH w0, 1 // w0<- field ref BBBB
ldr x1, [xFP, #OFF_FP_METHOD]
lsr w2, wINST, #8 // w3<- AA
- add x2, xFP, w2, lsl #2
+ VREG_INDEX_TO_ADDR x2, w2
mov x3, xSELF
PREFETCH_INST 2 // Get next inst, but don't advance rPC
bl artSet64IndirectStaticFromMterp
diff --git a/runtime/interpreter/mterp/out/mterp_mips.S b/runtime/interpreter/mterp/out/mterp_mips.S
index 7ae1ab1..b134129 100644
--- a/runtime/interpreter/mterp/out/mterp_mips.S
+++ b/runtime/interpreter/mterp/out/mterp_mips.S
@@ -542,7 +542,7 @@
/* set up "named" registers */
move rSELF, a0
lw a0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
- addu rFP, a2, SHADOWFRAME_VREGS_OFFSET # point to insns[] (i.e. - the dalivk byte code).
+ addu rFP, a2, SHADOWFRAME_VREGS_OFFSET # point to vregs.
EAS2(rREFS, rFP, a0) # point to reference array in shadow frame
lw a0, SHADOWFRAME_DEX_PC_OFFSET(a2) # Get starting dex_pc
addu rPC, a1, CODEITEM_INSNS_OFFSET # Point to base of insns[]
@@ -4373,8 +4373,8 @@
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
*
* For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
* xor-int, shl-int, shr-int, ushr-int
@@ -4412,8 +4412,8 @@
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
*
* For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
* xor-int, shl-int, shr-int, ushr-int
@@ -4451,8 +4451,8 @@
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
*
* For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
* xor-int, shl-int, shr-int, ushr-int
@@ -4491,8 +4491,8 @@
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
*
* For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
* xor-int, shl-int, shr-int, ushr-int
@@ -4526,8 +4526,8 @@
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
*
* For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
* xor-int, shl-int, shr-int, ushr-int
@@ -4567,8 +4567,8 @@
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
*
* For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
* xor-int, shl-int, shr-int, ushr-int
@@ -4602,8 +4602,8 @@
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
*
* For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
* xor-int, shl-int, shr-int, ushr-int
@@ -4642,8 +4642,8 @@
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
*
* For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
* xor-int, shl-int, shr-int, ushr-int
@@ -4681,8 +4681,8 @@
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
*
* For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
* xor-int, shl-int, shr-int, ushr-int
@@ -4720,8 +4720,8 @@
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
*
* For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
* xor-int, shl-int, shr-int, ushr-int
@@ -4759,8 +4759,8 @@
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
*
* For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
* xor-int, shl-int, shr-int, ushr-int
@@ -4798,8 +4798,8 @@
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
*
* For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
* xor-int, shl-int, shr-int, ushr-int
@@ -4837,8 +4837,8 @@
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
*
* For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
* xor-int, shl-int, shr-int, ushr-int
diff --git a/runtime/native/java_lang_Runtime.cc b/runtime/native/java_lang_Runtime.cc
deleted file mode 100644
index df794e1..0000000
--- a/runtime/native/java_lang_Runtime.cc
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "java_lang_Runtime.h"
-
-#include <dlfcn.h>
-#include <limits.h>
-#include <unistd.h>
-
-#include "base/macros.h"
-#include "gc/heap.h"
-#include "handle_scope-inl.h"
-#include "jni_internal.h"
-#include "mirror/class_loader.h"
-#include "runtime.h"
-#include "scoped_thread_state_change.h"
-#include "ScopedUtfChars.h"
-#include "verify_object-inl.h"
-
-#include <sstream>
-#ifdef __ANDROID__
-// This function is provided by android linker.
-extern "C" void android_update_LD_LIBRARY_PATH(const char* ld_library_path);
-#endif // __ANDROID__
-
-namespace art {
-
-static void Runtime_gc(JNIEnv*, jclass) {
- if (Runtime::Current()->IsExplicitGcDisabled()) {
- LOG(INFO) << "Explicit GC skipped.";
- return;
- }
- Runtime::Current()->GetHeap()->CollectGarbage(false);
-}
-
-NO_RETURN static void Runtime_nativeExit(JNIEnv*, jclass, jint status) {
- LOG(INFO) << "System.exit called, status: " << status;
- Runtime::Current()->CallExitHook(status);
- exit(status);
-}
-
-static void SetLdLibraryPath(JNIEnv* env, jstring javaLdLibraryPath) {
-#ifdef __ANDROID__
- if (javaLdLibraryPath != nullptr) {
- ScopedUtfChars ldLibraryPath(env, javaLdLibraryPath);
- if (ldLibraryPath.c_str() != nullptr) {
- android_update_LD_LIBRARY_PATH(ldLibraryPath.c_str());
- }
- }
-
-#else
- LOG(WARNING) << "android_update_LD_LIBRARY_PATH not found; .so dependencies will not work!";
- UNUSED(javaLdLibraryPath, env);
-#endif
-}
-
-static jstring Runtime_nativeLoad(JNIEnv* env,
- jclass,
- jstring javaFilename,
- jobject javaLoader,
- jstring javaLibrarySearchPath) {
- ScopedUtfChars filename(env, javaFilename);
- if (filename.c_str() == nullptr) {
- return nullptr;
- }
-
- int32_t target_sdk_version = Runtime::Current()->GetTargetSdkVersion();
-
- // Starting with N nativeLoad uses classloader local
- // linker namespace instead of global LD_LIBRARY_PATH
- // (23 is Marshmallow). This call is here to preserve
- // backwards compatibility for the apps targeting sdk
- // version <= 23
- if (target_sdk_version == 0) {
- SetLdLibraryPath(env, javaLibrarySearchPath);
- }
-
- std::string error_msg;
- {
- JavaVMExt* vm = Runtime::Current()->GetJavaVM();
- bool success = vm->LoadNativeLibrary(env,
- filename.c_str(),
- javaLoader,
- javaLibrarySearchPath,
- &error_msg);
- if (success) {
- return nullptr;
- }
- }
-
- // Don't let a pending exception from JNI_OnLoad cause a CheckJNI issue with NewStringUTF.
- env->ExceptionClear();
- return env->NewStringUTF(error_msg.c_str());
-}
-
-static jlong Runtime_maxMemory(JNIEnv*, jclass) {
- return Runtime::Current()->GetHeap()->GetMaxMemory();
-}
-
-static jlong Runtime_totalMemory(JNIEnv*, jclass) {
- return Runtime::Current()->GetHeap()->GetTotalMemory();
-}
-
-static jlong Runtime_freeMemory(JNIEnv*, jclass) {
- return Runtime::Current()->GetHeap()->GetFreeMemory();
-}
-
-static JNINativeMethod gMethods[] = {
- NATIVE_METHOD(Runtime, freeMemory, "!()J"),
- NATIVE_METHOD(Runtime, gc, "()V"),
- NATIVE_METHOD(Runtime, maxMemory, "!()J"),
- NATIVE_METHOD(Runtime, nativeExit, "(I)V"),
- NATIVE_METHOD(Runtime, nativeLoad, "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/String;)Ljava/lang/String;"),
- NATIVE_METHOD(Runtime, totalMemory, "!()J"),
-};
-
-void register_java_lang_Runtime(JNIEnv* env) {
- REGISTER_NATIVE_METHODS("java/lang/Runtime");
-}
-
-} // namespace art
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 7155c79..033ea56 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -89,6 +89,7 @@
uint8_t* oat_file_begin,
bool writable,
bool executable,
+ bool low_4gb,
const char* abs_dex_location,
std::string* error_msg);
@@ -102,6 +103,7 @@
uint8_t* oat_file_begin,
bool writable,
bool executable,
+ bool low_4gb,
std::string* error_msg) = 0;
bool ComputeFields(uint8_t* requested_base,
@@ -133,6 +135,7 @@
uint8_t* oat_file_begin,
bool writable,
bool executable,
+ bool low_4gb,
const char* abs_dex_location,
std::string* error_msg) {
std::unique_ptr<OatFileBase> ret(new kOatFileBaseSubType(location, executable));
@@ -140,6 +143,7 @@
oat_file_begin,
writable,
executable,
+ low_4gb,
error_msg)) {
return nullptr;
}
@@ -147,7 +151,6 @@
if (!ret->ComputeFields(requested_base, elf_filename, error_msg)) {
return nullptr;
}
-
ret->PreSetup(elf_filename);
if (!ret->Setup(abs_dex_location, error_msg)) {
@@ -532,6 +535,7 @@
uint8_t* oat_file_begin,
bool writable,
bool executable,
+ bool low_4gb,
std::string* error_msg) OVERRIDE;
// Ask the linker where it mmaped the file and notify our mmap wrapper of the regions.
@@ -558,6 +562,7 @@
uint8_t* oat_file_begin,
bool writable,
bool executable,
+ bool low_4gb,
std::string* error_msg) {
// Use dlopen only when flagged to do so, and when it's OK to load things executable.
// TODO: Also try when not executable? The issue here could be re-mapping as writable (as
@@ -567,6 +572,10 @@
*error_msg = "DlOpen is disabled.";
return false;
}
+ if (low_4gb) {
+ *error_msg = "DlOpen does not support low 4gb loading.";
+ return false;
+ }
if (writable) {
*error_msg = "DlOpen does not support writable loading.";
return false;
@@ -702,6 +711,7 @@
uint8_t* oat_file_begin, // Override base if not null
bool writable,
bool executable,
+ bool low_4gb,
const char* abs_dex_location,
std::string* error_msg);
@@ -723,6 +733,7 @@
uint8_t* oat_file_begin, // Override where the file is loaded to if not null
bool writable,
bool executable,
+ bool low_4gb,
std::string* error_msg) OVERRIDE;
void PreSetup(const std::string& elf_filename ATTRIBUTE_UNUSED) OVERRIDE {
@@ -733,6 +744,7 @@
uint8_t* oat_file_begin, // Override where the file is loaded to if not null
bool writable,
bool executable,
+ bool low_4gb,
std::string* error_msg);
private:
@@ -748,11 +760,17 @@
uint8_t* oat_file_begin, // Override base if not null
bool writable,
bool executable,
+ bool low_4gb,
const char* abs_dex_location,
std::string* error_msg) {
ScopedTrace trace("Open elf file " + location);
std::unique_ptr<ElfOatFile> oat_file(new ElfOatFile(location, executable));
- bool success = oat_file->ElfFileOpen(file, oat_file_begin, writable, executable, error_msg);
+ bool success = oat_file->ElfFileOpen(file,
+ oat_file_begin,
+ writable,
+ low_4gb,
+ executable,
+ error_msg);
if (!success) {
CHECK(!error_msg->empty());
return nullptr;
@@ -792,6 +810,7 @@
uint8_t* oat_file_begin, // Override where the file is loaded to if not null
bool writable,
bool executable,
+ bool low_4gb,
std::string* error_msg) {
ScopedTrace trace(__PRETTY_FUNCTION__);
std::unique_ptr<File> file(OS::OpenFileForReading(elf_filename.c_str()));
@@ -803,6 +822,7 @@
oat_file_begin,
writable,
executable,
+ low_4gb,
error_msg);
}
@@ -810,19 +830,21 @@
uint8_t* oat_file_begin,
bool writable,
bool executable,
+ bool low_4gb,
std::string* error_msg) {
ScopedTrace trace(__PRETTY_FUNCTION__);
// TODO: rename requested_base to oat_data_begin
elf_file_.reset(ElfFile::Open(file,
writable,
/*program_header_only*/true,
+ low_4gb,
error_msg,
oat_file_begin));
if (elf_file_ == nullptr) {
DCHECK(!error_msg->empty());
return false;
}
- bool loaded = elf_file_->Load(executable, error_msg);
+ bool loaded = elf_file_->Load(executable, low_4gb, error_msg);
DCHECK(loaded || !error_msg->empty());
return loaded;
}
@@ -870,6 +892,7 @@
uint8_t* requested_base,
uint8_t* oat_file_begin,
bool executable,
+ bool low_4gb,
const char* abs_dex_location,
std::string* error_msg) {
ScopedTrace trace("Open oat file " + location);
@@ -885,15 +908,15 @@
oat_file_begin,
false,
executable,
+ low_4gb,
abs_dex_location,
error_msg);
if (with_dlopen != nullptr) {
return with_dlopen;
}
if (kPrintDlOpenErrorMessage) {
- LOG(ERROR) << "Failed to dlopen: " << *error_msg;
+ LOG(ERROR) << "Failed to dlopen: " << filename << " with error " << *error_msg;
}
-
// If we aren't trying to execute, we just use our own ElfFile loader for a couple reasons:
//
// On target, dlopen may fail when compiling due to selinux restrictions on installd.
@@ -913,6 +936,7 @@
oat_file_begin,
false,
executable,
+ low_4gb,
abs_dex_location,
error_msg);
return with_internal;
@@ -929,6 +953,7 @@
nullptr,
true,
false,
+ /*low_4gb*/false,
abs_dex_location,
error_msg);
}
@@ -944,6 +969,7 @@
nullptr,
false,
false,
+ /*low_4gb*/false,
abs_dex_location,
error_msg);
}
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 1084253..7af77ae 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -64,6 +64,7 @@
uint8_t* requested_base,
uint8_t* oat_file_begin,
bool executable,
+ bool low_4gb,
const char* abs_dex_location,
std::string* error_msg);
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 90712c6..cbc0ec6 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -815,8 +815,13 @@
const std::string& odex_file_name = *OdexFileName();
std::string error_msg;
cached_odex_file_.reset(OatFile::Open(odex_file_name.c_str(),
- odex_file_name.c_str(), nullptr, nullptr, load_executable_,
- dex_location_.c_str(), &error_msg));
+ odex_file_name.c_str(),
+ nullptr,
+ nullptr,
+ load_executable_,
+ /*low_4gb*/false,
+ dex_location_.c_str(),
+ &error_msg));
if (cached_odex_file_.get() == nullptr) {
VLOG(oat) << "OatFileAssistant test for existing pre-compiled oat file "
<< odex_file_name << ": " << error_msg;
@@ -846,8 +851,13 @@
const std::string& oat_file_name = *OatFileName();
std::string error_msg;
cached_oat_file_.reset(OatFile::Open(oat_file_name.c_str(),
- oat_file_name.c_str(), nullptr, nullptr, load_executable_,
- dex_location_.c_str(), &error_msg));
+ oat_file_name.c_str(),
+ nullptr,
+ nullptr,
+ load_executable_,
+ /*low_4gb*/false,
+ dex_location_.c_str(),
+ &error_msg));
if (cached_oat_file_.get() == nullptr) {
VLOG(oat) << "OatFileAssistant test for existing oat file "
<< oat_file_name << ": " << error_msg;
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 4541468..046d8ae 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -218,9 +218,14 @@
// Verify the odex file was generated as expected and really is
// unrelocated.
- std::unique_ptr<OatFile> odex_file(OatFile::Open(
- odex_location.c_str(), odex_location.c_str(), nullptr, nullptr,
- false, dex_location.c_str(), &error_msg));
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(odex_location.c_str(),
+ odex_location.c_str(),
+ nullptr,
+ nullptr,
+ false,
+ /*low_4gb*/false,
+ dex_location.c_str(),
+ &error_msg));
ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
const std::vector<gc::space::ImageSpace*> image_spaces =
@@ -252,9 +257,14 @@
setenv("ANDROID_DATA", android_data_.c_str(), 1);
// Verify the odex file was generated as expected.
- std::unique_ptr<OatFile> odex_file(OatFile::Open(
- odex_location.c_str(), odex_location.c_str(), nullptr, nullptr,
- false, dex_location.c_str(), &error_msg));
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(odex_location.c_str(),
+ odex_location.c_str(),
+ nullptr,
+ nullptr,
+ false,
+ /*low_4gb*/false,
+ dex_location.c_str(),
+ &error_msg));
ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
EXPECT_TRUE(odex_file->IsPic());
}
@@ -269,9 +279,14 @@
ASSERT_TRUE(OatFileAssistant::Dex2Oat(args, &error_msg)) << error_msg;
// Verify the odex file was generated as expected.
- std::unique_ptr<OatFile> odex_file(OatFile::Open(
- odex_location.c_str(), odex_location.c_str(), nullptr, nullptr,
- false, dex_location.c_str(), &error_msg));
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(odex_location.c_str(),
+ odex_location.c_str(),
+ nullptr,
+ nullptr,
+ false,
+ /*low_4gb*/false,
+ dex_location.c_str(),
+ &error_msg));
ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
EXPECT_TRUE(odex_file->IsExtractOnly());
EXPECT_EQ(odex_file->GetOatHeader().GetImageFileLocationOatChecksum(), 0u);
@@ -290,9 +305,14 @@
ASSERT_TRUE(OatFileAssistant::Dex2Oat(args, &error_msg)) << error_msg;
// Verify the odex file was generated as expected.
- std::unique_ptr<OatFile> odex_file(OatFile::Open(
- odex_location.c_str(), odex_location.c_str(), nullptr, nullptr,
- false, dex_location.c_str(), &error_msg));
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(odex_location.c_str(),
+ odex_location.c_str(),
+ nullptr,
+ nullptr,
+ false,
+ /*low_4gb*/false,
+ dex_location.c_str(),
+ &error_msg));
printf("error %s", error_msg.c_str());
ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
EXPECT_TRUE(odex_file->IsProfileGuideCompiled());
diff --git a/runtime/openjdkjvm/OpenjdkJvm.cc b/runtime/openjdkjvm/OpenjdkJvm.cc
index d377457..aff9b61 100644
--- a/runtime/openjdkjvm/OpenjdkJvm.cc
+++ b/runtime/openjdkjvm/OpenjdkJvm.cc
@@ -342,15 +342,15 @@
// Starting with N nativeLoad uses classloader local
// linker namespace instead of global LD_LIBRARY_PATH
- // (23 is Marshmallow)
- if (target_sdk_version <= 23) {
+ // (23 is Marshmallow). This call is here to preserve
+ // backwards compatibility for the apps targeting sdk
+ // version <= 23
+ if (target_sdk_version == 0) {
SetLdLibraryPath(env, javaLibrarySearchPath);
}
std::string error_msg;
{
- art::ScopedObjectAccess soa(env);
- art::StackHandleScope<1> hs(soa.Self());
art::JavaVMExt* vm = art::Runtime::Current()->GetJavaVM();
bool success = vm->LoadNativeLibrary(env,
filename.c_str(),
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index e95f2c5..7726a75 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -810,7 +810,11 @@
return false;
}
std::string error_msg;
- std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file.release(), false, false, &error_msg));
+ std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file.release(),
+ false,
+ false,
+ /*low_4gb*/false,
+ &error_msg));
if (elf_file.get() == nullptr) {
return false;
}
@@ -1336,7 +1340,6 @@
register_java_lang_reflect_Method(env);
register_java_lang_reflect_Proxy(env);
register_java_lang_ref_Reference(env);
- register_java_lang_Runtime(env);
register_java_lang_String(env);
register_java_lang_StringFactory(env);
register_java_lang_System(env);
diff --git a/test/458-checker-instruction-simplification/src/Main.java b/test/458-checker-instruction-simplification/src/Main.java
index dd4ffe4..53c2e0b 100644
--- a/test/458-checker-instruction-simplification/src/Main.java
+++ b/test/458-checker-instruction-simplification/src/Main.java
@@ -414,6 +414,23 @@
return arg >> 0;
}
+ /// CHECK-START: long Main.Shr64(long) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ /// CHECK-DAG: <<Const64:i\d+>> IntConstant 64
+ /// CHECK-DAG: <<Shr:j\d+>> Shr [<<Arg>>,<<Const64>>]
+ /// CHECK-DAG: Return [<<Shr>>]
+
+ /// CHECK-START: long Main.Shr64(long) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ /// CHECK-DAG: Return [<<Arg>>]
+
+ /// CHECK-START: long Main.Shr64(long) instruction_simplifier (after)
+ /// CHECK-NOT: Shr
+
+ public static long Shr64(long arg) {
+ return arg >> 64;
+ }
+
/// CHECK-START: long Main.Sub0(long) instruction_simplifier (before)
/// CHECK-DAG: <<Arg:j\d+>> ParameterValue
/// CHECK-DAG: <<Const0:j\d+>> LongConstant 0
@@ -1601,6 +1618,24 @@
return (short) (value & 0x17fff);
}
+ /// CHECK-START: double Main.shortAnd0xffffToShortToDouble(short) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:s\d+>> ParameterValue
+ /// CHECK-DAG: <<Mask:i\d+>> IntConstant 65535
+ /// CHECK-DAG: <<And:i\d+>> And [<<Mask>>,<<Arg>>]
+ /// CHECK-DAG: <<Same:s\d+>> TypeConversion [<<And>>]
+ /// CHECK-DAG: <<Double:d\d+>> TypeConversion [<<Same>>]
+ /// CHECK-DAG: Return [<<Double>>]
+
+ /// CHECK-START: double Main.shortAnd0xffffToShortToDouble(short) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:s\d+>> ParameterValue
+ /// CHECK-DAG: <<Double:d\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: Return [<<Double>>]
+
+ public static double shortAnd0xffffToShortToDouble(short value) {
+ short same = (short) (value & 0xffff);
+ return (double) same;
+ }
+
/// CHECK-START: int Main.intReverseCondition(int) instruction_simplifier (before)
/// CHECK-DAG: <<Arg:i\d+>> ParameterValue
/// CHECK-DAG: <<Const42:i\d+>> IntConstant 42
@@ -1653,6 +1688,7 @@
assertLongEquals(OrSame(arg), arg);
assertIntEquals(Shl0(arg), arg);
assertLongEquals(Shr0(arg), arg);
+ assertLongEquals(Shr64(arg), arg);
assertLongEquals(Sub0(arg), arg);
assertIntEquals(SubAliasNeg(arg), -arg);
assertLongEquals(UShr0(arg), arg);
@@ -1717,56 +1753,63 @@
assertIntEquals(doubleConditionEqualZero(6.0), 54);
assertIntEquals(doubleConditionEqualZero(43.0), 13);
- assertIntEquals(intToDoubleToInt(1234567), 1234567);
- assertIntEquals(intToDoubleToInt(Integer.MIN_VALUE), Integer.MIN_VALUE);
- assertIntEquals(intToDoubleToInt(Integer.MAX_VALUE), Integer.MAX_VALUE);
- assertStringEquals(intToDoubleToIntPrint(7654321), "d=7654321.0, i=7654321");
- assertIntEquals(byteToDoubleToInt((byte) 12), 12);
- assertIntEquals(byteToDoubleToInt(Byte.MIN_VALUE), Byte.MIN_VALUE);
- assertIntEquals(byteToDoubleToInt(Byte.MAX_VALUE), Byte.MAX_VALUE);
- assertIntEquals(floatToDoubleToInt(11.3f), 11);
- assertStringEquals(floatToDoubleToIntPrint(12.25f), "d=12.25, i=12");
- assertIntEquals(byteToDoubleToShort((byte) 123), 123);
- assertIntEquals(byteToDoubleToShort(Byte.MIN_VALUE), Byte.MIN_VALUE);
- assertIntEquals(byteToDoubleToShort(Byte.MAX_VALUE), Byte.MAX_VALUE);
- assertIntEquals(charToDoubleToShort((char) 1234), 1234);
- assertIntEquals(charToDoubleToShort(Character.MIN_VALUE), Character.MIN_VALUE);
- assertIntEquals(charToDoubleToShort(Character.MAX_VALUE), /* sign-extended */ -1);
- assertIntEquals(floatToIntToShort(12345.75f), 12345);
- assertIntEquals(floatToIntToShort((float)(Short.MIN_VALUE - 1)), Short.MAX_VALUE);
- assertIntEquals(floatToIntToShort((float)(Short.MAX_VALUE + 1)), Short.MIN_VALUE);
- assertIntEquals(intToFloatToInt(-54321), -54321);
- assertDoubleEquals(longToIntToDouble(0x1234567812345678L), (double) 0x12345678);
- assertDoubleEquals(longToIntToDouble(Long.MIN_VALUE), 0.0);
- assertDoubleEquals(longToIntToDouble(Long.MAX_VALUE), -1.0);
- assertLongEquals(longToIntToLong(0x1234567812345678L), 0x0000000012345678L);
- assertLongEquals(longToIntToLong(0x1234567887654321L), 0xffffffff87654321L);
- assertLongEquals(longToIntToLong(Long.MIN_VALUE), 0L);
- assertLongEquals(longToIntToLong(Long.MAX_VALUE), -1L);
- assertIntEquals(shortToCharToShort((short) -5678), (short) -5678);
- assertIntEquals(shortToCharToShort(Short.MIN_VALUE), Short.MIN_VALUE);
- assertIntEquals(shortToCharToShort(Short.MAX_VALUE), Short.MAX_VALUE);
- assertIntEquals(shortToLongToInt((short) 5678), 5678);
- assertIntEquals(shortToLongToInt(Short.MIN_VALUE), Short.MIN_VALUE);
- assertIntEquals(shortToLongToInt(Short.MAX_VALUE), Short.MAX_VALUE);
- assertIntEquals(shortToCharToByte((short) 0x1234), 0x34);
- assertIntEquals(shortToCharToByte((short) 0x12f0), -0x10);
- assertIntEquals(shortToCharToByte(Short.MIN_VALUE), 0);
- assertIntEquals(shortToCharToByte(Short.MAX_VALUE), -1);
- assertStringEquals(shortToCharToBytePrint((short) 1025), "c=1025, b=1");
- assertStringEquals(shortToCharToBytePrint((short) 1023), "c=1023, b=-1");
- assertStringEquals(shortToCharToBytePrint((short) -1), "c=65535, b=-1");
+ assertIntEquals(1234567, intToDoubleToInt(1234567));
+ assertIntEquals(Integer.MIN_VALUE, intToDoubleToInt(Integer.MIN_VALUE));
+ assertIntEquals(Integer.MAX_VALUE, intToDoubleToInt(Integer.MAX_VALUE));
+ assertStringEquals("d=7654321.0, i=7654321", intToDoubleToIntPrint(7654321));
+ assertIntEquals(12, byteToDoubleToInt((byte) 12));
+ assertIntEquals(Byte.MIN_VALUE, byteToDoubleToInt(Byte.MIN_VALUE));
+ assertIntEquals(Byte.MAX_VALUE, byteToDoubleToInt(Byte.MAX_VALUE));
+ assertIntEquals(11, floatToDoubleToInt(11.3f));
+ assertStringEquals("d=12.25, i=12", floatToDoubleToIntPrint(12.25f));
+ assertIntEquals(123, byteToDoubleToShort((byte) 123));
+ assertIntEquals(Byte.MIN_VALUE, byteToDoubleToShort(Byte.MIN_VALUE));
+ assertIntEquals(Byte.MAX_VALUE, byteToDoubleToShort(Byte.MAX_VALUE));
+ assertIntEquals(1234, charToDoubleToShort((char) 1234));
+ assertIntEquals(Character.MIN_VALUE, charToDoubleToShort(Character.MIN_VALUE));
+ assertIntEquals(/* sign-extended */ -1, charToDoubleToShort(Character.MAX_VALUE));
+ assertIntEquals(12345, floatToIntToShort(12345.75f));
+ assertIntEquals(Short.MAX_VALUE, floatToIntToShort((float)(Short.MIN_VALUE - 1)));
+ assertIntEquals(Short.MIN_VALUE, floatToIntToShort((float)(Short.MAX_VALUE + 1)));
+ assertIntEquals(-54321, intToFloatToInt(-54321));
+ assertDoubleEquals((double) 0x12345678, longToIntToDouble(0x1234567812345678L));
+ assertDoubleEquals(0.0, longToIntToDouble(Long.MIN_VALUE));
+ assertDoubleEquals(-1.0, longToIntToDouble(Long.MAX_VALUE));
+ assertLongEquals(0x0000000012345678L, longToIntToLong(0x1234567812345678L));
+ assertLongEquals(0xffffffff87654321L, longToIntToLong(0x1234567887654321L));
+ assertLongEquals(0L, longToIntToLong(Long.MIN_VALUE));
+ assertLongEquals(-1L, longToIntToLong(Long.MAX_VALUE));
+ assertIntEquals((short) -5678, shortToCharToShort((short) -5678));
+ assertIntEquals(Short.MIN_VALUE, shortToCharToShort(Short.MIN_VALUE));
+ assertIntEquals(Short.MAX_VALUE, shortToCharToShort(Short.MAX_VALUE));
+ assertIntEquals(5678, shortToLongToInt((short) 5678));
+ assertIntEquals(Short.MIN_VALUE, shortToLongToInt(Short.MIN_VALUE));
+ assertIntEquals(Short.MAX_VALUE, shortToLongToInt(Short.MAX_VALUE));
+ assertIntEquals(0x34, shortToCharToByte((short) 0x1234));
+ assertIntEquals(-0x10, shortToCharToByte((short) 0x12f0));
+ assertIntEquals(0, shortToCharToByte(Short.MIN_VALUE));
+ assertIntEquals(-1, shortToCharToByte(Short.MAX_VALUE));
+ assertStringEquals("c=1025, b=1", shortToCharToBytePrint((short) 1025));
+ assertStringEquals("c=1023, b=-1", shortToCharToBytePrint((short) 1023));
+ assertStringEquals("c=65535, b=-1", shortToCharToBytePrint((short) -1));
- assertIntEquals(longAnd0xffToByte(0x1234432112344321L), 0x21);
- assertIntEquals(longAnd0xffToByte(Long.MIN_VALUE), 0);
- assertIntEquals(longAnd0xffToByte(Long.MAX_VALUE), -1);
- assertIntEquals(intAnd0x1ffffToChar(0x43211234), 0x1234);
- assertIntEquals(intAnd0x1ffffToChar(Integer.MIN_VALUE), 0);
- assertIntEquals(intAnd0x1ffffToChar(Integer.MAX_VALUE), Character.MAX_VALUE);
- assertIntEquals(intAnd0x17fffToShort(0x87654321), 0x4321);
- assertIntEquals(intAnd0x17fffToShort(0x88888888), 0x0888);
- assertIntEquals(intAnd0x17fffToShort(Integer.MIN_VALUE), 0);
- assertIntEquals(intAnd0x17fffToShort(Integer.MAX_VALUE), Short.MAX_VALUE);
+ assertIntEquals(0x21, longAnd0xffToByte(0x1234432112344321L));
+ assertIntEquals(0, longAnd0xffToByte(Long.MIN_VALUE));
+ assertIntEquals(-1, longAnd0xffToByte(Long.MAX_VALUE));
+ assertIntEquals(0x1234, intAnd0x1ffffToChar(0x43211234));
+ assertIntEquals(0, intAnd0x1ffffToChar(Integer.MIN_VALUE));
+ assertIntEquals(Character.MAX_VALUE, intAnd0x1ffffToChar(Integer.MAX_VALUE));
+ assertIntEquals(0x4321, intAnd0x17fffToShort(0x87654321));
+ assertIntEquals(0x0888, intAnd0x17fffToShort(0x88888888));
+ assertIntEquals(0, intAnd0x17fffToShort(Integer.MIN_VALUE));
+ assertIntEquals(Short.MAX_VALUE, intAnd0x17fffToShort(Integer.MAX_VALUE));
+
+ assertDoubleEquals(0.0, shortAnd0xffffToShortToDouble((short) 0));
+ assertDoubleEquals(1.0, shortAnd0xffffToShortToDouble((short) 1));
+ assertDoubleEquals(-2.0, shortAnd0xffffToShortToDouble((short) -2));
+ assertDoubleEquals(12345.0, shortAnd0xffffToShortToDouble((short) 12345));
+ assertDoubleEquals((double)Short.MAX_VALUE, shortAnd0xffffToShortToDouble(Short.MAX_VALUE));
+ assertDoubleEquals((double)Short.MIN_VALUE, shortAnd0xffffToShortToDouble(Short.MIN_VALUE));
assertIntEquals(intReverseCondition(41), 13);
assertIntEquals(intReverseConditionNaN(-5), 13);
diff --git a/test/582-checker-bce-length/expected.txt b/test/582-checker-bce-length/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/582-checker-bce-length/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/582-checker-bce-length/info.txt b/test/582-checker-bce-length/info.txt
new file mode 100644
index 0000000..cb826cd
--- /dev/null
+++ b/test/582-checker-bce-length/info.txt
@@ -0,0 +1 @@
+Regression test on deopt bounds check elimination.
diff --git a/test/582-checker-bce-length/src/Main.java b/test/582-checker-bce-length/src/Main.java
new file mode 100644
index 0000000..3565b6b
--- /dev/null
+++ b/test/582-checker-bce-length/src/Main.java
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Regression test on duplicate removal of same bounds check.
+ */
+public class Main {
+
+ /// CHECK-START: void Main.doit1(int[]) BCE (before)
+ /// CHECK-DAG: BoundsCheck
+ /// CHECK-DAG: BoundsCheck
+ /// CHECK-DAG: BoundsCheck
+ /// CHECK-DAG: BoundsCheck
+ //
+ /// CHECK-START: void Main.doit1(int[]) BCE (after)
+ /// CHECK-DAG: BoundsCheck
+ /// CHECK-DAG: BoundsCheck
+ /// CHECK-DAG: BoundsCheck
+ /// CHECK-DAG: BoundsCheck
+ //
+ /// CHECK-START: void Main.doit1(int[]) BCE (after)
+ /// CHECK-NOT: Deoptimize
+ public static void doit1(int[] a) {
+ a[a.length-3] = 1;
+ a[a.length-2] = 2;
+ a[a.length-1] = 3;
+ // This introduces a problematic BoundsCheck(x,x) node
+ // (1) certain OOB, so should be rejected
+ // (2) exposed bug in removing same BC twice if (1) would not be done.
+ a[a.length-0] = 4;
+ }
+
+ /// CHECK-START: void Main.doit2(int[]) BCE (before)
+ /// CHECK-DAG: BoundsCheck
+ /// CHECK-DAG: BoundsCheck
+ /// CHECK-DAG: BoundsCheck
+ /// CHECK-DAG: BoundsCheck
+ //
+ /// CHECK-START: void Main.doit2(int[]) BCE (after)
+ /// CHECK-DAG: Deoptimize
+ /// CHECK-DAG: Deoptimize
+ //
+ /// CHECK-START: void Main.doit2(int[]) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ public static void doit2(int[] a) {
+ a[a.length-4] = -101;
+ a[a.length-3] = -102;
+ a[a.length-2] = -103;
+ a[a.length-1] = -104;
+ }
+
+ public static void main(String[] args) {
+ int[] a = new int[4];
+
+ int fail = 0;
+ try {
+ doit1(a);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ fail++;
+ }
+ expectEquals(1, fail);
+ expectEquals(0, a[0]);
+ expectEquals(1, a[1]);
+ expectEquals(2, a[2]);
+ expectEquals(3, a[3]);
+
+ try {
+ doit2(a);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ fail++;
+ }
+ expectEquals(1, fail);
+ expectEquals(-101, a[0]);
+ expectEquals(-102, a[1]);
+ expectEquals(-103, a[2]);
+ expectEquals(-104, a[3]);
+
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}
diff --git a/tools/libcore_failures_concurrent_collector.txt b/tools/libcore_failures_concurrent_collector.txt
index 95f0c2d..75d1eff 100644
--- a/tools/libcore_failures_concurrent_collector.txt
+++ b/tools/libcore_failures_concurrent_collector.txt
@@ -16,5 +16,11 @@
names: ["jsr166.LinkedTransferQueueTest#testTransfer2",
"jsr166.LinkedTransferQueueTest#testWaitingConsumer"],
bug: 25883050
+},
+{
+ description: "libcore.java.lang.OldSystemTest#test_gc failure on armv8-concurrent-collector.",
+ result: EXEC_FAILED,
+ names: ["libcore.java.lang.OldSystemTest#test_gc"],
+ bug: 26155567
}
]