Merge "Rosalloc fast path in assembly for MIPS32"
diff --git a/Android.mk b/Android.mk
index 34022ae..4f73127 100644
--- a/Android.mk
+++ b/Android.mk
@@ -400,6 +400,35 @@
$(TEST_ART_ADB_ROOT_AND_REMOUNT)
adb sync
+####################################################################################################
+# Fake packages to ensure generation of libopenjdkd when one builds with mm/mmm/mmma.
+#
+# The library is required for starting a runtime in debug mode, but libartd does not depend on it
+# (dependency cycle otherwise).
+#
+# Note: * As the package is phony to create a dependency the package name is irrelevant.
+# * We make MULTILIB explicit to "both," just to state here that we want both libraries on
+# 64-bit systems, even if it is the default.
+
+# ART on the host.
+ifeq ($(ART_BUILD_HOST_DEBUG),true)
+include $(CLEAR_VARS)
+LOCAL_MODULE := art-libartd-libopenjdkd-host-dependency
+LOCAL_MULTILIB := both
+LOCAL_REQUIRED_MODULES := libopenjdkd
+LOCAL_IS_HOST_MODULE := true
+include $(BUILD_PHONY_PACKAGE)
+endif
+
+# ART on the target.
+ifeq ($(ART_BUILD_TARGET_DEBUG),true)
+include $(CLEAR_VARS)
+LOCAL_MODULE := art-libartd-libopenjdkd-target-dependency
+LOCAL_MULTILIB := both
+LOCAL_REQUIRED_MODULES := libopenjdkd
+include $(BUILD_PHONY_PACKAGE)
+endif
+
########################################################################
# "m build-art" for quick minimal build
.PHONY: build-art
diff --git a/build/Android.cpplint.mk b/build/Android.cpplint.mk
index 953cfc0..a06f45a 100644
--- a/build/Android.cpplint.mk
+++ b/build/Android.cpplint.mk
@@ -16,10 +16,14 @@
include art/build/Android.common_build.mk
-ART_CPPLINT := art/tools/cpplint.py
+ART_CPPLINT := $(LOCAL_PATH)/tools/cpplint.py
ART_CPPLINT_FILTER := --filter=-whitespace/line_length,-build/include,-readability/function,-readability/streams,-readability/todo,-runtime/references,-runtime/sizeof,-runtime/threadsafe_fn,-runtime/printf
ART_CPPLINT_FLAGS := --quiet
-ART_CPPLINT_SRC := $(shell find art -name "*.h" -o -name "*$(ART_CPP_EXTENSION)" | grep -v art/compiler/llvm/generated/ | grep -v art/runtime/elf\.h)
+# This:
+# 1) Gets a list of all .h & .cc files in the art directory.
+# 2) Prepends 'art/' to each of them to make the full name.
+# 3) removes art/runtime/elf.h from the list.
+ART_CPPLINT_SRC := $(filter-out $(LOCAL_PATH)/runtime/elf.h, $(addprefix $(LOCAL_PATH)/, $(call all-subdir-named-files,*.h) $(call all-subdir-named-files,*$(ART_CPP_EXTENSION))))
# "mm cpplint-art" to verify we aren't regressing
.PHONY: cpplint-art
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 3d16c49..af64470 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -441,7 +441,7 @@
$(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_TARGET_GTEST_$(file)_DEX)) \
$$(ART_TARGET_NATIVETEST_OUT)/$$(TARGET_$(2)ARCH)/$(1) \
$$($(2)TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so \
- $$($(2)TARGET_OUT_SHARED_LIBRARIES)/libopenjdk.so \
+ $$($(2)TARGET_OUT_SHARED_LIBRARIES)/libopenjdkd.so \
$$(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar \
$$(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar
@@ -485,7 +485,7 @@
# Dependencies for all host gtests.
gtest_deps := $$(HOST_CORE_DEX_LOCATIONS) \
$$($(2)ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$$(ART_HOST_SHLIB_EXTENSION) \
- $$($(2)ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdk$$(ART_HOST_SHLIB_EXTENSION) \
+ $$($(2)ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkd$$(ART_HOST_SHLIB_EXTENSION) \
$$(gtest_exe) \
$$(ART_GTEST_$(1)_HOST_DEPS) \
$(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_HOST_GTEST_$(file)_DEX))
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 209bb5a..385f34a 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -211,11 +211,9 @@
generate_debug_info_ = false;
} else if (option == "--debuggable") {
debuggable_ = true;
- generate_debug_info_ = true;
} else if (option == "--native-debuggable") {
native_debuggable_ = true;
debuggable_ = true;
- generate_debug_info_ = true;
} else if (option.starts_with("--top-k-profile-threshold=")) {
ParseDouble(option.data(), '=', 0.0, 100.0, &top_k_profile_threshold_, Usage);
} else if (option == "--include-patch-information") {
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index f8032bb..f14bdc4 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -50,7 +50,7 @@
static const size_t kDefaultNumDexMethodsThreshold = 900;
static constexpr double kDefaultTopKProfileThreshold = 90.0;
static const bool kDefaultNativeDebuggable = false;
- static const bool kDefaultGenerateDebugInfo = kIsDebugBuild;
+ static const bool kDefaultGenerateDebugInfo = false;
static const bool kDefaultIncludePatchInformation = false;
static const size_t kDefaultInlineDepthLimit = 3;
static const size_t kDefaultInlineMaxCodeUnits = 32;
diff --git a/compiler/dwarf/method_debug_info.h b/compiler/dwarf/method_debug_info.h
index a391e4d..e8ba914 100644
--- a/compiler/dwarf/method_debug_info.h
+++ b/compiler/dwarf/method_debug_info.h
@@ -30,8 +30,8 @@
uint32_t access_flags_;
const DexFile::CodeItem* code_item_;
bool deduped_;
- uint32_t low_pc_;
- uint32_t high_pc_;
+ uintptr_t low_pc_;
+ uintptr_t high_pc_;
CompiledMethod* compiled_method_;
};
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index bb07cc2..a7461a5 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -148,6 +148,12 @@
}
}
+ // Returns true if the section was written to disk.
+ // (Used to check whether we have .text when writing JIT debug info)
+ bool Exists() const {
+ return finished_;
+ }
+
// Get the location of this section in virtual memory.
Elf_Addr GetAddress() const {
CHECK(started_);
@@ -247,16 +253,18 @@
}
// Buffer symbol for this section. It will be written later.
+ // If the symbol's section is null, it will be considered absolute (SHN_ABS).
+ // (we use this in JIT to reference code which is stored outside the debug ELF file)
void Add(Elf_Word name, const Section* section,
Elf_Addr addr, bool is_relative, Elf_Word size,
uint8_t binding, uint8_t type, uint8_t other = 0) {
- CHECK(section != nullptr);
Elf_Sym sym = Elf_Sym();
sym.st_name = name;
sym.st_value = addr + (is_relative ? section->GetAddress() : 0);
sym.st_size = size;
sym.st_other = other;
- sym.st_shndx = section->GetSectionIndex();
+ sym.st_shndx = (section != nullptr ? section->GetSectionIndex()
+ : static_cast<Elf_Word>(SHN_ABS));
sym.st_info = (binding << 4) + (type & 0xf);
symbols_.push_back(sym);
}
diff --git a/compiler/elf_writer_debug.cc b/compiler/elf_writer_debug.cc
index 2bc8c89..dd50f69 100644
--- a/compiler/elf_writer_debug.cc
+++ b/compiler/elf_writer_debug.cc
@@ -22,16 +22,20 @@
#include "base/casts.h"
#include "base/stl_util.h"
#include "compiled_method.h"
-#include "driver/compiler_driver.h"
#include "dex_file-inl.h"
+#include "driver/compiler_driver.h"
#include "dwarf/dedup_vector.h"
#include "dwarf/headers.h"
#include "dwarf/method_debug_info.h"
#include "dwarf/register.h"
#include "elf_builder.h"
+#include "linker/vector_output_stream.h"
+#include "mirror/array.h"
+#include "mirror/class-inl.h"
+#include "mirror/class.h"
#include "oat_writer.h"
-#include "utils.h"
#include "stack_map.h"
+#include "utils.h"
namespace art {
namespace dwarf {
@@ -219,6 +223,10 @@
CHECK(format == DW_DEBUG_FRAME_FORMAT || format == DW_EH_FRAME_FORMAT);
typedef typename ElfTypes::Addr Elf_Addr;
+ if (method_infos.empty()) {
+ return;
+ }
+
std::vector<uint32_t> binary_search_table;
std::vector<uintptr_t> patch_locations;
if (format == DW_EH_FRAME_FORMAT) {
@@ -234,7 +242,9 @@
{
cfi_section->Start();
const bool is64bit = Is64BitInstructionSet(builder->GetIsa());
- const Elf_Addr text_address = builder->GetText()->GetAddress();
+ const Elf_Addr text_address = builder->GetText()->Exists()
+ ? builder->GetText()->GetAddress()
+ : 0;
const Elf_Addr cfi_address = cfi_section->GetAddress();
const Elf_Addr cie_address = cfi_address;
Elf_Addr buffer_address = cfi_address;
@@ -305,8 +315,8 @@
struct CompilationUnit {
std::vector<const MethodDebugInfo*> methods_;
size_t debug_line_offset_ = 0;
- uint32_t low_pc_ = 0xFFFFFFFFU;
- uint32_t high_pc_ = 0;
+ uintptr_t low_pc_ = std::numeric_limits<uintptr_t>::max();
+ uintptr_t high_pc_ = 0;
};
typedef std::vector<DexFile::LocalInfo> LocalInfos;
@@ -439,14 +449,17 @@
void Write(const CompilationUnit& compilation_unit) {
CHECK(!compilation_unit.methods_.empty());
- const Elf_Addr text_address = owner_->builder_->GetText()->GetAddress();
+ const Elf_Addr text_address = owner_->builder_->GetText()->Exists()
+ ? owner_->builder_->GetText()->GetAddress()
+ : 0;
+ const uintptr_t cu_size = compilation_unit.high_pc_ - compilation_unit.low_pc_;
info_.StartTag(DW_TAG_compile_unit);
info_.WriteStrp(DW_AT_producer, owner_->WriteString("Android dex2oat"));
info_.WriteData1(DW_AT_language, DW_LANG_Java);
info_.WriteStrp(DW_AT_comp_dir, owner_->WriteString("$JAVA_SRC_ROOT"));
info_.WriteAddr(DW_AT_low_pc, text_address + compilation_unit.low_pc_);
- info_.WriteUdata(DW_AT_high_pc, compilation_unit.high_pc_ - compilation_unit.low_pc_);
+ info_.WriteUdata(DW_AT_high_pc, dchecked_integral_cast<uint32_t>(cu_size));
info_.WriteSecOffset(DW_AT_stmt_list, compilation_unit.debug_line_offset_);
const char* last_dex_class_desc = nullptr;
@@ -464,8 +477,16 @@
if (last_dex_class_desc != nullptr) {
EndClassTag(last_dex_class_desc);
}
- size_t offset = StartClassTag(dex_class_desc);
- type_cache_.emplace(dex_class_desc, offset);
+ // Write reference tag for the class we are about to declare.
+ size_t reference_tag_offset = info_.StartTag(DW_TAG_reference_type);
+ type_cache_.emplace(std::string(dex_class_desc), reference_tag_offset);
+ size_t type_attrib_offset = info_.size();
+ info_.WriteRef4(DW_AT_type, 0);
+ info_.EndTag();
+ // Declare the class that owns this method.
+ size_t class_offset = StartClassTag(dex_class_desc);
+ info_.UpdateUint32(type_attrib_offset, class_offset);
+ info_.WriteFlag(DW_AT_declaration, true);
// Check that each class is defined only once.
bool unique = owner_->defined_dex_classes_.insert(dex_class_desc).second;
CHECK(unique) << "Redefinition of " << dex_class_desc;
@@ -476,7 +497,7 @@
info_.StartTag(DW_TAG_subprogram);
WriteName(dex->GetMethodName(dex_method));
info_.WriteAddr(DW_AT_low_pc, text_address + mi->low_pc_);
- info_.WriteUdata(DW_AT_high_pc, mi->high_pc_ - mi->low_pc_);
+ info_.WriteUdata(DW_AT_high_pc, dchecked_integral_cast<uint32_t>(mi->high_pc_-mi->low_pc_));
uint8_t frame_base[] = { DW_OP_call_frame_cfa };
info_.WriteExprLoc(DW_AT_frame_base, &frame_base, sizeof(frame_base));
WriteLazyType(dex->GetReturnTypeDescriptor(dex_proto));
@@ -562,6 +583,92 @@
owner_->builder_->GetDebugInfo()->WriteFully(buffer.data(), buffer.size());
}
+ void Write(const ArrayRef<mirror::Class*>& types) SHARED_REQUIRES(Locks::mutator_lock_) {
+ info_.StartTag(DW_TAG_compile_unit);
+ info_.WriteStrp(DW_AT_producer, owner_->WriteString("Android dex2oat"));
+ info_.WriteData1(DW_AT_language, DW_LANG_Java);
+
+ for (mirror::Class* type : types) {
+ if (type->IsPrimitive()) {
+ // For primitive types the definition and the declaration is the same.
+ if (type->GetPrimitiveType() != Primitive::kPrimVoid) {
+ WriteTypeDeclaration(type->GetDescriptor(nullptr));
+ }
+ } else if (type->IsArrayClass()) {
+ mirror::Class* element_type = type->GetComponentType();
+ uint32_t component_size = type->GetComponentSize();
+ uint32_t data_offset = mirror::Array::DataOffset(component_size).Uint32Value();
+ uint32_t length_offset = mirror::Array::LengthOffset().Uint32Value();
+
+ info_.StartTag(DW_TAG_array_type);
+ std::string descriptor_string;
+ WriteLazyType(element_type->GetDescriptor(&descriptor_string));
+ info_.WriteUdata(DW_AT_data_member_location, data_offset);
+ info_.StartTag(DW_TAG_subrange_type);
+ DCHECK_LT(length_offset, 32u);
+ uint8_t count[] = {
+ DW_OP_push_object_address,
+ static_cast<uint8_t>(DW_OP_lit0 + length_offset),
+ DW_OP_plus,
+ DW_OP_deref_size,
+ 4 // Array length is always 32-bit wide.
+ };
+ info_.WriteExprLoc(DW_AT_count, &count, sizeof(count));
+ info_.EndTag(); // DW_TAG_subrange_type.
+ info_.EndTag(); // DW_TAG_array_type.
+ } else {
+ std::string descriptor_string;
+ const char* desc = type->GetDescriptor(&descriptor_string);
+ StartClassTag(desc);
+
+ if (!type->IsVariableSize()) {
+ info_.WriteUdata(DW_AT_byte_size, type->GetObjectSize());
+ }
+
+ // Base class.
+ mirror::Class* base_class = type->GetSuperClass();
+ if (base_class != nullptr) {
+ info_.StartTag(DW_TAG_inheritance);
+ WriteLazyType(base_class->GetDescriptor(&descriptor_string));
+ info_.WriteUdata(DW_AT_data_member_location, 0);
+ info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_public);
+ info_.EndTag(); // DW_TAG_inheritance.
+ }
+
+ // Member variables.
+ for (uint32_t i = 0, count = type->NumInstanceFields(); i < count; ++i) {
+ ArtField* field = type->GetInstanceField(i);
+ info_.StartTag(DW_TAG_member);
+ WriteName(field->GetName());
+ WriteLazyType(field->GetTypeDescriptor());
+ info_.WriteUdata(DW_AT_data_member_location, field->GetOffset().Uint32Value());
+ uint32_t access_flags = field->GetAccessFlags();
+ if (access_flags & kAccPublic) {
+ info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_public);
+ } else if (access_flags & kAccProtected) {
+ info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_protected);
+ } else if (access_flags & kAccPrivate) {
+ info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_private);
+ }
+ info_.EndTag(); // DW_TAG_member.
+ }
+
+ EndClassTag(desc);
+ }
+ }
+
+ CHECK_EQ(info_.Depth(), 1);
+ FinishLazyTypes();
+ info_.EndTag(); // DW_TAG_compile_unit.
+ std::vector<uint8_t> buffer;
+ buffer.reserve(info_.data()->size() + KB);
+ const size_t offset = owner_->builder_->GetDebugInfo()->GetSize();
+ const size_t debug_abbrev_offset =
+ owner_->debug_abbrev_.Insert(debug_abbrev_.data(), debug_abbrev_.size());
+ WriteDebugInfoCU(debug_abbrev_offset, info_, offset, &buffer, &owner_->debug_info_patches_);
+ owner_->builder_->GetDebugInfo()->WriteFully(buffer.data(), buffer.size());
+ }
+
// Write table into .debug_loc which describes location of dex register.
// The dex register might be valid only at some points and it might
// move between machine registers and stack.
@@ -715,14 +822,14 @@
// just define all types lazily at the end of compilation unit.
void WriteLazyType(const char* type_descriptor) {
if (type_descriptor != nullptr && type_descriptor[0] != 'V') {
- lazy_types_.emplace(type_descriptor, info_.size());
+ lazy_types_.emplace(std::string(type_descriptor), info_.size());
info_.WriteRef4(DW_AT_type, 0);
}
}
void FinishLazyTypes() {
for (const auto& lazy_type : lazy_types_) {
- info_.UpdateUint32(lazy_type.second, WriteType(lazy_type.first));
+ info_.UpdateUint32(lazy_type.second, WriteTypeDeclaration(lazy_type.first));
}
lazy_types_.clear();
}
@@ -747,30 +854,39 @@
// Convert dex type descriptor to DWARF.
// Returns offset in the compilation unit.
- size_t WriteType(const char* desc) {
+ size_t WriteTypeDeclaration(const std::string& desc) {
+ DCHECK(!desc.empty());
const auto& it = type_cache_.find(desc);
if (it != type_cache_.end()) {
return it->second;
}
size_t offset;
- if (*desc == 'L') {
+ if (desc[0] == 'L') {
// Class type. For example: Lpackage/name;
- offset = StartClassTag(desc);
+ size_t class_offset = StartClassTag(desc.c_str());
info_.WriteFlag(DW_AT_declaration, true);
- EndClassTag(desc);
- } else if (*desc == '[') {
+ EndClassTag(desc.c_str());
+ // Reference to the class type.
+ offset = info_.StartTag(DW_TAG_reference_type);
+ info_.WriteRef(DW_AT_type, class_offset);
+ info_.EndTag();
+ } else if (desc[0] == '[') {
// Array type.
- size_t element_type = WriteType(desc + 1);
- offset = info_.StartTag(DW_TAG_array_type);
+ size_t element_type = WriteTypeDeclaration(desc.substr(1));
+ size_t array_type = info_.StartTag(DW_TAG_array_type);
+ info_.WriteFlag(DW_AT_declaration, true);
info_.WriteRef(DW_AT_type, element_type);
info_.EndTag();
+ offset = info_.StartTag(DW_TAG_reference_type);
+ info_.WriteRef4(DW_AT_type, array_type);
+ info_.EndTag();
} else {
// Primitive types.
const char* name;
uint32_t encoding;
uint32_t byte_size;
- switch (*desc) {
+ switch (desc[0]) {
case 'B':
name = "byte";
encoding = DW_ATE_signed;
@@ -815,7 +931,7 @@
LOG(FATAL) << "Void type should not be encoded";
UNREACHABLE();
default:
- LOG(FATAL) << "Unknown dex type descriptor: " << desc;
+ LOG(FATAL) << "Unknown dex type descriptor: \"" << desc << "\"";
UNREACHABLE();
}
offset = info_.StartTag(DW_TAG_base_type);
@@ -865,9 +981,10 @@
// Temporary buffer to create and store the entries.
DebugInfoEntryWriter<> info_;
// Cache of already translated type descriptors.
- std::map<const char*, size_t, CStringLess> type_cache_; // type_desc -> definition_offset.
+ std::map<std::string, size_t> type_cache_; // type_desc -> definition_offset.
// 32-bit references which need to be resolved to a type later.
- std::multimap<const char*, size_t, CStringLess> lazy_types_; // type_desc -> patch_offset.
+ // Given type may be used multiple times. Therefore we need a multimap.
+ std::multimap<std::string, size_t> lazy_types_; // type_desc -> patch_offset.
};
public:
@@ -883,6 +1000,11 @@
writer.Write(compilation_unit);
}
+ void WriteTypes(const ArrayRef<mirror::Class*>& types) SHARED_REQUIRES(Locks::mutator_lock_) {
+ CompilationUnitWriter writer(this);
+ writer.Write(types);
+ }
+
void End() {
builder_->GetDebugInfo()->End();
builder_->WritePatches(".debug_info.oat_patches",
@@ -924,7 +1046,9 @@
// Returns the number of bytes written.
size_t WriteCompilationUnit(CompilationUnit& compilation_unit) {
const bool is64bit = Is64BitInstructionSet(builder_->GetIsa());
- const Elf_Addr text_address = builder_->GetText()->GetAddress();
+ const Elf_Addr text_address = builder_->GetText()->Exists()
+ ? builder_->GetText()->GetAddress()
+ : 0;
compilation_unit.debug_line_offset_ = builder_->GetDebugLine()->GetSize();
@@ -1102,9 +1226,27 @@
std::vector<uintptr_t> debug_line_patches;
};
+// Get all types loaded by the runtime.
+static std::vector<mirror::Class*> GetLoadedRuntimeTypes() SHARED_REQUIRES(Locks::mutator_lock_) {
+ std::vector<mirror::Class*> result;
+ class CollectClasses : public ClassVisitor {
+ public:
+ virtual bool Visit(mirror::Class* klass) {
+ classes_->push_back(klass);
+ return true;
+ }
+ std::vector<mirror::Class*>* classes_;
+ };
+ CollectClasses visitor;
+ visitor.classes_ = &result;
+ Runtime::Current()->GetClassLinker()->VisitClasses(&visitor);
+ return result;
+}
+
template<typename ElfTypes>
-void WriteDebugSections(ElfBuilder<ElfTypes>* builder,
- const ArrayRef<const MethodDebugInfo>& method_infos) {
+static void WriteDebugSections(ElfBuilder<ElfTypes>* builder,
+ bool write_loaded_runtime_types,
+ const ArrayRef<const MethodDebugInfo>& method_infos) {
// Group the methods into compilation units based on source file.
std::vector<CompilationUnit> compilation_units;
const char* last_source_file = nullptr;
@@ -1122,7 +1264,7 @@
}
// Write .debug_line section.
- {
+ if (!compilation_units.empty()) {
DebugLineWriter<ElfTypes> line_writer(builder);
line_writer.Start();
for (auto& compilation_unit : compilation_units) {
@@ -1132,12 +1274,19 @@
}
// Write .debug_info section.
- {
+ if (!compilation_units.empty() || write_loaded_runtime_types) {
DebugInfoWriter<ElfTypes> info_writer(builder);
info_writer.Start();
for (const auto& compilation_unit : compilation_units) {
info_writer.WriteCompilationUnit(compilation_unit);
}
+ if (write_loaded_runtime_types) {
+ Thread* self = Thread::Current();
+ // The lock prevents the classes being moved by the GC.
+ ReaderMutexLock mu(self, *Locks::mutator_lock_);
+ std::vector<mirror::Class*> types = GetLoadedRuntimeTypes();
+ info_writer.WriteTypes(ArrayRef<mirror::Class*>(types.data(), types.size()));
+ }
info_writer.End();
}
}
@@ -1173,11 +1322,13 @@
name += " [DEDUPED]";
}
+ const auto* text = builder->GetText()->Exists() ? builder->GetText() : nullptr;
+ const bool is_relative = (text != nullptr);
uint32_t low_pc = info.low_pc_;
// Add in code delta, e.g., thumb bit 0 for Thumb2 code.
low_pc += info.compiled_method_->CodeDelta();
- symtab->Add(strtab->Write(name), builder->GetText(), low_pc,
- true, info.high_pc_ - info.low_pc_, STB_GLOBAL, STT_FUNC);
+ symtab->Add(strtab->Write(name), text, low_pc,
+ is_relative, info.high_pc_ - info.low_pc_, STB_GLOBAL, STT_FUNC);
// Conforming to aaelf, add $t mapping symbol to indicate start of a sequence of thumb2
// instructions, so that disassembler tools can correctly disassemble.
@@ -1185,8 +1336,8 @@
// requires it to match function symbol. Just address 0 does not work.
if (info.compiled_method_->GetInstructionSet() == kThumb2) {
if (!generated_mapping_symbol || !kGenerateSingleArmMappingSymbol) {
- symtab->Add(strtab->Write("$t"), builder->GetText(), info.low_pc_ & ~1,
- true, 0, STB_LOCAL, STT_NOTYPE);
+ symtab->Add(strtab->Write("$t"), text, info.low_pc_ & ~1,
+ is_relative, 0, STB_LOCAL, STT_NOTYPE);
generated_mapping_symbol = true;
}
}
@@ -1202,25 +1353,89 @@
template <typename ElfTypes>
void WriteDebugInfo(ElfBuilder<ElfTypes>* builder,
+ bool write_loaded_runtime_types,
const ArrayRef<const MethodDebugInfo>& method_infos,
CFIFormat cfi_format) {
- if (!method_infos.empty()) {
- // Add methods to .symtab.
- WriteDebugSymbols(builder, method_infos);
- // Generate CFI (stack unwinding information).
- WriteCFISection(builder, method_infos, cfi_format);
- // Write DWARF .debug_* sections.
- WriteDebugSections(builder, method_infos);
+ // Add methods to .symtab.
+ WriteDebugSymbols(builder, method_infos);
+ // Generate CFI (stack unwinding information).
+ WriteCFISection(builder, method_infos, cfi_format);
+ // Write DWARF .debug_* sections.
+ WriteDebugSections(builder, write_loaded_runtime_types, method_infos);
+}
+
+template <typename ElfTypes>
+static ArrayRef<const uint8_t> WriteDebugElfFileForMethodInternal(
+ const dwarf::MethodDebugInfo& method_info) {
+ const InstructionSet isa = method_info.compiled_method_->GetInstructionSet();
+ std::vector<uint8_t> buffer;
+ buffer.reserve(KB);
+ VectorOutputStream out("Debug ELF file", &buffer);
+ std::unique_ptr<ElfBuilder<ElfTypes>> builder(new ElfBuilder<ElfTypes>(isa, &out));
+ builder->Start();
+ WriteDebugInfo(builder.get(),
+ false,
+ ArrayRef<const MethodDebugInfo>(&method_info, 1),
+ DW_DEBUG_FRAME_FORMAT);
+ builder->End();
+ CHECK(builder->Good());
+ // Make a copy of the buffer. We want to shrink it anyway.
+ uint8_t* result = new uint8_t[buffer.size()];
+ CHECK(result != nullptr);
+ memcpy(result, buffer.data(), buffer.size());
+ return ArrayRef<const uint8_t>(result, buffer.size());
+}
+
+ArrayRef<const uint8_t> WriteDebugElfFileForMethod(const dwarf::MethodDebugInfo& method_info) {
+ const InstructionSet isa = method_info.compiled_method_->GetInstructionSet();
+ if (Is64BitInstructionSet(isa)) {
+ return WriteDebugElfFileForMethodInternal<ElfTypes64>(method_info);
+ } else {
+ return WriteDebugElfFileForMethodInternal<ElfTypes32>(method_info);
+ }
+}
+
+template <typename ElfTypes>
+static ArrayRef<const uint8_t> WriteDebugElfFileForClassInternal(const InstructionSet isa,
+ mirror::Class* type)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ std::vector<uint8_t> buffer;
+ buffer.reserve(KB);
+ VectorOutputStream out("Debug ELF file", &buffer);
+ std::unique_ptr<ElfBuilder<ElfTypes>> builder(new ElfBuilder<ElfTypes>(isa, &out));
+ builder->Start();
+
+ DebugInfoWriter<ElfTypes> info_writer(builder.get());
+ info_writer.Start();
+ info_writer.WriteTypes(ArrayRef<mirror::Class*>(&type, 1));
+ info_writer.End();
+
+ builder->End();
+ CHECK(builder->Good());
+ // Make a copy of the buffer. We want to shrink it anyway.
+ uint8_t* result = new uint8_t[buffer.size()];
+ CHECK(result != nullptr);
+ memcpy(result, buffer.data(), buffer.size());
+ return ArrayRef<const uint8_t>(result, buffer.size());
+}
+
+ArrayRef<const uint8_t> WriteDebugElfFileForClass(const InstructionSet isa, mirror::Class* type) {
+ if (Is64BitInstructionSet(isa)) {
+ return WriteDebugElfFileForClassInternal<ElfTypes64>(isa, type);
+ } else {
+ return WriteDebugElfFileForClassInternal<ElfTypes32>(isa, type);
}
}
// Explicit instantiations
template void WriteDebugInfo<ElfTypes32>(
ElfBuilder<ElfTypes32>* builder,
+ bool write_loaded_runtime_types,
const ArrayRef<const MethodDebugInfo>& method_infos,
CFIFormat cfi_format);
template void WriteDebugInfo<ElfTypes64>(
ElfBuilder<ElfTypes64>* builder,
+ bool write_loaded_runtime_types,
const ArrayRef<const MethodDebugInfo>& method_infos,
CFIFormat cfi_format);
diff --git a/compiler/elf_writer_debug.h b/compiler/elf_writer_debug.h
index 7ec0be1..91da00f 100644
--- a/compiler/elf_writer_debug.h
+++ b/compiler/elf_writer_debug.h
@@ -17,19 +17,30 @@
#ifndef ART_COMPILER_ELF_WRITER_DEBUG_H_
#define ART_COMPILER_ELF_WRITER_DEBUG_H_
-#include "elf_builder.h"
+#include "base/macros.h"
+#include "base/mutex.h"
#include "dwarf/dwarf_constants.h"
-#include "oat_writer.h"
+#include "elf_builder.h"
#include "utils/array_ref.h"
namespace art {
+namespace mirror {
+class Class;
+}
namespace dwarf {
+struct MethodDebugInfo;
template <typename ElfTypes>
void WriteDebugInfo(ElfBuilder<ElfTypes>* builder,
+ bool write_loaded_runtime_types,
const ArrayRef<const MethodDebugInfo>& method_infos,
CFIFormat cfi_format);
+ArrayRef<const uint8_t> WriteDebugElfFileForMethod(const dwarf::MethodDebugInfo& method_info);
+
+ArrayRef<const uint8_t> WriteDebugElfFileForClass(const InstructionSet isa, mirror::Class* type)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
} // namespace dwarf
} // namespace art
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index 7b1bdd7..a67f3bd 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -152,7 +152,7 @@
void ElfWriterQuick<ElfTypes>::WriteDebugInfo(
const ArrayRef<const dwarf::MethodDebugInfo>& method_infos) {
if (compiler_options_->GetGenerateDebugInfo()) {
- dwarf::WriteDebugInfo(builder_.get(), method_infos, kCFIFormat);
+ dwarf::WriteDebugInfo(builder_.get(), /* write_types */ true, method_infos, kCFIFormat);
}
}
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index dc75ff1..d710747 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -1142,7 +1142,7 @@
loop->IsDefinedOutOfTheLoop(array_get->InputAt(1))) {
SideEffects loop_effects = side_effects_.GetLoopEffects(loop->GetHeader());
if (!array_get->GetSideEffects().MayDependOn(loop_effects)) {
- HoistToPreheaderOrDeoptBlock(loop, array_get);
+ HoistToPreHeaderOrDeoptBlock(loop, array_get);
}
}
}
@@ -1280,7 +1280,8 @@
// as runtime test. By restricting dynamic bce to unit strides (with a maximum of 32-bit
// iterations) and by not combining access (e.g. a[i], a[i-3], a[i+5] etc.), these tests
// correctly guard against any possible OOB (including arithmetic wrap-around cases).
- HBasicBlock* block = TransformLoopForDeoptimizationIfNeeded(loop, needs_taken_test);
+ TransformLoopForDeoptimizationIfNeeded(loop, needs_taken_test);
+ HBasicBlock* block = GetPreHeader(loop, instruction);
induction_range_.GenerateRangeCode(instruction, index, GetGraph(), block, &lower, &upper);
if (lower != nullptr) {
InsertDeopt(loop, block, new (GetGraph()->GetArena()) HAbove(lower, upper));
@@ -1353,7 +1354,7 @@
return true;
} else if (length->IsArrayLength() && length->GetBlock()->GetLoopInformation() == loop) {
if (CanHandleNullCheck(loop, length->InputAt(0), needs_taken_test)) {
- HoistToPreheaderOrDeoptBlock(loop, length);
+ HoistToPreHeaderOrDeoptBlock(loop, length);
return true;
}
}
@@ -1371,7 +1372,8 @@
HInstruction* array = check->InputAt(0);
if (loop->IsDefinedOutOfTheLoop(array)) {
// Generate: if (array == null) deoptimize;
- HBasicBlock* block = TransformLoopForDeoptimizationIfNeeded(loop, needs_taken_test);
+ TransformLoopForDeoptimizationIfNeeded(loop, needs_taken_test);
+ HBasicBlock* block = GetPreHeader(loop, check);
HInstruction* cond =
new (GetGraph()->GetArena()) HEqual(array, GetGraph()->GetNullConstant());
InsertDeopt(loop, block, cond);
@@ -1418,6 +1420,28 @@
return true;
}
+ /**
+ * Returns appropriate preheader for the loop, depending on whether the
+ * instruction appears in the loop header or proper loop-body.
+ */
+ HBasicBlock* GetPreHeader(HLoopInformation* loop, HInstruction* instruction) {
+ // Use preheader unless there is an earlier generated deoptimization block since
+ // hoisted expressions may depend on and/or used by the deoptimization tests.
+ HBasicBlock* header = loop->GetHeader();
+ const uint32_t loop_id = header->GetBlockId();
+ auto it = taken_test_loop_.find(loop_id);
+ if (it != taken_test_loop_.end()) {
+ HBasicBlock* block = it->second;
+ // If always taken, keep it that way by returning the original preheader,
+ // which can be found by following the predecessor of the true-block twice.
+ if (instruction->GetBlock() == header) {
+ return block->GetSinglePredecessor()->GetSinglePredecessor();
+ }
+ return block;
+ }
+ return loop->GetPreHeader();
+ }
+
/** Inserts a deoptimization test. */
void InsertDeopt(HLoopInformation* loop, HBasicBlock* block, HInstruction* condition) {
HInstruction* suspend = loop->GetSuspendCheck();
@@ -1432,28 +1456,17 @@
}
/** Hoists instruction out of the loop to preheader or deoptimization block. */
- void HoistToPreheaderOrDeoptBlock(HLoopInformation* loop, HInstruction* instruction) {
- // Use preheader unless there is an earlier generated deoptimization block since
- // hoisted expressions may depend on and/or used by the deoptimization tests.
- const uint32_t loop_id = loop->GetHeader()->GetBlockId();
- HBasicBlock* preheader = loop->GetPreHeader();
- HBasicBlock* block = preheader;
- auto it = taken_test_loop_.find(loop_id);
- if (it != taken_test_loop_.end()) {
- block = it->second;
- }
- // Hoist the instruction.
+ void HoistToPreHeaderOrDeoptBlock(HLoopInformation* loop, HInstruction* instruction) {
+ HBasicBlock* block = GetPreHeader(loop, instruction);
DCHECK(!instruction->HasEnvironment());
instruction->MoveBefore(block->GetLastInstruction());
}
/**
- * Adds a new taken-test structure to a loop if needed (and not already done).
+ * Adds a new taken-test structure to a loop if needed and not already done.
* The taken-test protects range analysis evaluation code to avoid any
* deoptimization caused by incorrect trip-count evaluation in non-taken loops.
*
- * Returns block in which deoptimizations/invariants can be put.
- *
* old_preheader
* |
* if_block <- taken-test protects deoptimization block
@@ -1485,16 +1498,11 @@
* array[i] = 0;
* }
*/
- HBasicBlock* TransformLoopForDeoptimizationIfNeeded(HLoopInformation* loop, bool needs_taken_test) {
- // Not needed (can use preheader), or already done (can reuse)?
+ void TransformLoopForDeoptimizationIfNeeded(HLoopInformation* loop, bool needs_taken_test) {
+ // Not needed (can use preheader) or already done (can reuse)?
const uint32_t loop_id = loop->GetHeader()->GetBlockId();
- if (!needs_taken_test) {
- return loop->GetPreHeader();
- } else {
- auto it = taken_test_loop_.find(loop_id);
- if (it != taken_test_loop_.end()) {
- return it->second;
- }
+ if (!needs_taken_test || taken_test_loop_.find(loop_id) != taken_test_loop_.end()) {
+ return;
}
// Generate top test structure.
@@ -1523,7 +1531,6 @@
if_block->AddInstruction(new (GetGraph()->GetArena()) HIf(condition));
taken_test_loop_.Put(loop_id, true_block);
- return true_block;
}
/**
@@ -1538,7 +1545,7 @@
* \ /
* x_1 = phi(x_0, null) <- synthetic phi
* |
- * header
+ * new_preheader
*/
void InsertPhiNodes() {
// Scan all new deoptimization blocks.
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 950043e..5958cd8 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -613,7 +613,7 @@
ArenaVector<SlowPathCode*> slow_paths_;
- // The current slow path that we're generating code for.
+ // The current slow-path that we're generating code for.
SlowPathCode* current_slow_path_;
// The current block index in `block_order_` of the block
@@ -674,6 +674,122 @@
DISALLOW_COPY_AND_ASSIGN(CallingConvention);
};
+/**
+ * A templated class SlowPathGenerator with a templated method NewSlowPath()
+ * that can be used by any code generator to share equivalent slow-paths with
+ * the objective of reducing generated code size.
+ *
+ * InstructionType: instruction that requires SlowPathCodeType
+ * SlowPathCodeType: subclass of SlowPathCode, with constructor SlowPathCodeType(InstructionType *)
+ */
+template <typename InstructionType>
+class SlowPathGenerator {
+ static_assert(std::is_base_of<HInstruction, InstructionType>::value,
+ "InstructionType is not a subclass of art::HInstruction");
+
+ public:
+ SlowPathGenerator(HGraph* graph, CodeGenerator* codegen)
+ : graph_(graph),
+ codegen_(codegen),
+ slow_path_map_(std::less<uint32_t>(), graph->GetArena()->Adapter(kArenaAllocSlowPaths)) {}
+
+ // Creates and adds a new slow-path, if needed, or returns existing one otherwise.
+ // Templating the method (rather than the whole class) on the slow-path type enables
+ // keeping this code at a generic, non architecture-specific place.
+ //
+ // NOTE: This approach assumes each InstructionType only generates one SlowPathCodeType.
+ // To relax this requirement, we would need some RTTI on the stored slow-paths,
+ // or template the class as a whole on SlowPathType.
+ template <typename SlowPathCodeType>
+ SlowPathCodeType* NewSlowPath(InstructionType* instruction) {
+ static_assert(std::is_base_of<SlowPathCode, SlowPathCodeType>::value,
+ "SlowPathCodeType is not a subclass of art::SlowPathCode");
+ static_assert(std::is_constructible<SlowPathCodeType, InstructionType*>::value,
+ "SlowPathCodeType is not constructible from InstructionType*");
+ // Iterate over potential candidates for sharing. Currently, only same-typed
+ // slow-paths with exactly the same dex-pc are viable candidates.
+ // TODO: pass dex-pc/slow-path-type to run-time to allow even more sharing?
+ const uint32_t dex_pc = instruction->GetDexPc();
+ auto iter = slow_path_map_.find(dex_pc);
+ if (iter != slow_path_map_.end()) {
+ auto candidates = iter->second;
+ for (const auto& it : candidates) {
+ InstructionType* other_instruction = it.first;
+ SlowPathCodeType* other_slow_path = down_cast<SlowPathCodeType*>(it.second);
+ // Determine if the instructions allow for slow-path sharing.
+ if (HaveSameLiveRegisters(instruction, other_instruction) &&
+ HaveSameStackMap(instruction, other_instruction)) {
+ // Can share: reuse existing one.
+ return other_slow_path;
+ }
+ }
+ } else {
+ // First time this dex-pc is seen.
+ iter = slow_path_map_.Put(dex_pc, {{}, {graph_->GetArena()->Adapter(kArenaAllocSlowPaths)}});
+ }
+ // Cannot share: create and add new slow-path for this particular dex-pc.
+ SlowPathCodeType* slow_path = new (graph_->GetArena()) SlowPathCodeType(instruction);
+ iter->second.emplace_back(std::make_pair(instruction, slow_path));
+ codegen_->AddSlowPath(slow_path);
+ return slow_path;
+ }
+
+ private:
+ // Tests if both instructions have same set of live physical registers. This ensures
+ // the slow-path has exactly the same preamble on saving these registers to stack.
+ bool HaveSameLiveRegisters(const InstructionType* i1, const InstructionType* i2) const {
+ const uint32_t core_spill = ~codegen_->GetCoreSpillMask();
+ const uint32_t fpu_spill = ~codegen_->GetFpuSpillMask();
+ RegisterSet* live1 = i1->GetLocations()->GetLiveRegisters();
+ RegisterSet* live2 = i2->GetLocations()->GetLiveRegisters();
+ return (((live1->GetCoreRegisters() & core_spill) ==
+ (live2->GetCoreRegisters() & core_spill)) &&
+ ((live1->GetFloatingPointRegisters() & fpu_spill) ==
+ (live2->GetFloatingPointRegisters() & fpu_spill)));
+ }
+
+ // Tests if both instructions have the same stack map. This ensures the interpreter
+ // will find exactly the same dex-registers at the same entries.
+ bool HaveSameStackMap(const InstructionType* i1, const InstructionType* i2) const {
+ DCHECK(i1->HasEnvironment());
+ DCHECK(i2->HasEnvironment());
+ // We conservatively test if the two instructions find exactly the same instructions
+ // and location in each dex-register. This guarantees they will have the same stack map.
+ HEnvironment* e1 = i1->GetEnvironment();
+ HEnvironment* e2 = i2->GetEnvironment();
+ if (e1->GetParent() != e2->GetParent() || e1->Size() != e2->Size()) {
+ return false;
+ }
+ for (size_t i = 0, sz = e1->Size(); i < sz; ++i) {
+ if (e1->GetInstructionAt(i) != e2->GetInstructionAt(i) ||
+ !e1->GetLocationAt(i).Equals(e2->GetLocationAt(i))) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ HGraph* const graph_;
+ CodeGenerator* const codegen_;
+
+ // Map from dex-pc to vector of already existing instruction/slow-path pairs.
+ ArenaSafeMap<uint32_t, ArenaVector<std::pair<InstructionType*, SlowPathCode*>>> slow_path_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(SlowPathGenerator);
+};
+
+class InstructionCodeGenerator : public HGraphVisitor {
+ public:
+ InstructionCodeGenerator(HGraph* graph, CodeGenerator* codegen)
+ : HGraphVisitor(graph),
+ deopt_slow_paths_(graph, codegen) {}
+
+ protected:
+ // Add slow-path generator for each instruction/slow-path combination that desires sharing.
+ // TODO: under current regime, only deopt sharing make sense; extend later.
+ SlowPathGenerator<HDeoptimize> deopt_slow_paths_;
+};
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 24f06a3..45520b4 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -350,24 +350,24 @@
class DeoptimizationSlowPathARM : public SlowPathCode {
public:
- explicit DeoptimizationSlowPathARM(HInstruction* instruction)
+ explicit DeoptimizationSlowPathARM(HDeoptimize* instruction)
: instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
- DCHECK(instruction_->IsDeoptimize());
- HDeoptimize* deoptimize = instruction_->AsDeoptimize();
- uint32_t dex_pc = deoptimize->GetDexPc();
- CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
- arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
+ arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
CheckEntrypointTypes<kQuickDeoptimize, void, void>();
}
const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARM"; }
private:
- HInstruction* const instruction_;
+ HDeoptimize* const instruction_;
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM);
};
@@ -913,7 +913,7 @@
}
InstructionCodeGeneratorARM::InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen)
- : HGraphVisitor(graph),
+ : InstructionCodeGenerator(graph, codegen),
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
@@ -1414,17 +1414,6 @@
void InstructionCodeGeneratorARM::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
}
-void InstructionCodeGeneratorARM::GenerateCompareWithImmediate(Register left, int32_t right) {
- ShifterOperand operand;
- if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, right, &operand)) {
- __ cmp(left, operand);
- } else {
- Register temp = IP;
- __ LoadImmediate(temp, right);
- __ cmp(left, ShifterOperand(temp));
- }
-}
-
void InstructionCodeGeneratorARM::GenerateFPJumps(HCondition* cond,
Label* true_label,
Label* false_label) {
@@ -1490,7 +1479,7 @@
int32_t val_low = Low32Bits(value);
int32_t val_high = High32Bits(value);
- GenerateCompareWithImmediate(left_high, val_high);
+ __ CmpConstant(left_high, val_high);
if (if_cond == kCondNE) {
__ b(true_label, ARMCondition(true_high_cond));
} else if (if_cond == kCondEQ) {
@@ -1500,7 +1489,7 @@
__ b(false_label, ARMCondition(false_high_cond));
}
// Must be equal high, so compare the lows.
- GenerateCompareWithImmediate(left_low, val_low);
+ __ CmpConstant(left_low, val_low);
} else {
Register right_high = right.AsRegisterPairHigh<Register>();
Register right_low = right.AsRegisterPairLow<Register>();
@@ -1624,7 +1613,7 @@
__ cmp(left, ShifterOperand(right.AsRegister<Register>()));
} else {
DCHECK(right.IsConstant());
- GenerateCompareWithImmediate(left, CodeGenerator::GetInt32ValueOf(right.GetConstant()));
+ __ CmpConstant(left, CodeGenerator::GetInt32ValueOf(right.GetConstant()));
}
if (true_target == nullptr) {
__ b(false_target, ARMCondition(condition->GetOppositeCondition()));
@@ -1666,8 +1655,7 @@
}
void InstructionCodeGeneratorARM::VisitDeoptimize(HDeoptimize* deoptimize) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) DeoptimizationSlowPathARM(deoptimize);
- codegen_->AddSlowPath(slow_path);
+ SlowPathCode* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathARM>(deoptimize);
GenerateTestAndBranch(deoptimize,
/* condition_input_index */ 0,
slow_path->GetEntryLabel(),
@@ -1735,8 +1723,8 @@
__ cmp(left.AsRegister<Register>(), ShifterOperand(right.AsRegister<Register>()));
} else {
DCHECK(right.IsConstant());
- GenerateCompareWithImmediate(left.AsRegister<Register>(),
- CodeGenerator::GetInt32ValueOf(right.GetConstant()));
+ __ CmpConstant(left.AsRegister<Register>(),
+ CodeGenerator::GetInt32ValueOf(right.GetConstant()));
}
__ it(ARMCondition(cond->GetCondition()), kItElse);
__ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1),
@@ -6553,7 +6541,7 @@
}
if (num_entries - last_index == 2) {
// The last missing case_value.
- GenerateCompareWithImmediate(temp_reg, 1);
+ __ CmpConstant(temp_reg, 1);
__ b(codegen_->GetLabelOf(successors[last_index + 1]), EQ);
}
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index f9c49a5..26d6d63 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -188,7 +188,7 @@
DISALLOW_COPY_AND_ASSIGN(LocationsBuilderARM);
};
-class InstructionCodeGeneratorARM : public HGraphVisitor {
+class InstructionCodeGeneratorARM : public InstructionCodeGenerator {
public:
InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen);
@@ -273,7 +273,6 @@
size_t condition_input_index,
Label* true_target,
Label* false_target);
- void GenerateCompareWithImmediate(Register left, int32_t right);
void GenerateCompareTestAndBranch(HCondition* condition,
Label* true_target,
Label* false_target);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 1ad487d..a3150d3 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -477,24 +477,24 @@
class DeoptimizationSlowPathARM64 : public SlowPathCodeARM64 {
public:
- explicit DeoptimizationSlowPathARM64(HInstruction* instruction)
+ explicit DeoptimizationSlowPathARM64(HDeoptimize* instruction)
: instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
- DCHECK(instruction_->IsDeoptimize());
- HDeoptimize* deoptimize = instruction_->AsDeoptimize();
- uint32_t dex_pc = deoptimize->GetDexPc();
- CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
- arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
+ arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
CheckEntrypointTypes<kQuickDeoptimize, void, void>();
}
const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARM64"; }
private:
- HInstruction* const instruction_;
+ HDeoptimize* const instruction_;
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM64);
};
@@ -1605,7 +1605,7 @@
InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
CodeGeneratorARM64* codegen)
- : HGraphVisitor(graph),
+ : InstructionCodeGenerator(graph, codegen),
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
@@ -2939,9 +2939,8 @@
}
void InstructionCodeGeneratorARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
- SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena())
- DeoptimizationSlowPathARM64(deoptimize);
- codegen_->AddSlowPath(slow_path);
+ SlowPathCodeARM64* slow_path =
+ deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathARM64>(deoptimize);
GenerateTestAndBranch(deoptimize,
/* condition_input_index */ 0,
slow_path->GetEntryLabel(),
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 0e90ac6..f2ff894 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -186,7 +186,7 @@
DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionARM64);
};
-class InstructionCodeGeneratorARM64 : public HGraphVisitor {
+class InstructionCodeGeneratorARM64 : public InstructionCodeGenerator {
public:
InstructionCodeGeneratorARM64(HGraph* graph, CodeGeneratorARM64* codegen);
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 7bc0635..75f5fb3 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -444,19 +444,16 @@
class DeoptimizationSlowPathMIPS : public SlowPathCodeMIPS {
public:
- explicit DeoptimizationSlowPathMIPS(HInstruction* instruction)
+ explicit DeoptimizationSlowPathMIPS(HDeoptimize* instruction)
: instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
- DCHECK(instruction_->IsDeoptimize());
- HDeoptimize* deoptimize = instruction_->AsDeoptimize();
- uint32_t dex_pc = deoptimize->GetDexPc();
- CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
instruction_,
- dex_pc,
+ instruction_->GetDexPc(),
this,
IsDirectEntrypoint(kQuickDeoptimize));
CheckEntrypointTypes<kQuickDeoptimize, void, void>();
@@ -465,7 +462,7 @@
const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS"; }
private:
- HInstruction* const instruction_;
+ HDeoptimize* const instruction_;
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS);
};
@@ -1241,7 +1238,7 @@
InstructionCodeGeneratorMIPS::InstructionCodeGeneratorMIPS(HGraph* graph,
CodeGeneratorMIPS* codegen)
- : HGraphVisitor(graph),
+ : InstructionCodeGenerator(graph, codegen),
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
@@ -1511,7 +1508,7 @@
}
void LocationsBuilderMIPS::HandleShift(HBinaryOperation* instr) {
- DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
+ DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
Primitive::Type type = instr->GetResultType();
@@ -1534,7 +1531,7 @@
static constexpr size_t kMipsBitsPerWord = kMipsWordSize * kBitsPerByte;
void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
- DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
+ DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
LocationSummary* locations = instr->GetLocations();
Primitive::Type type = instr->GetType();
@@ -1544,28 +1541,49 @@
int64_t rhs_imm = use_imm ? CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant()) : 0;
uint32_t shift_mask = (type == Primitive::kPrimInt) ? kMaxIntShiftValue : kMaxLongShiftValue;
uint32_t shift_value = rhs_imm & shift_mask;
- // Is the INS (Insert Bit Field) instruction supported?
- bool has_ins = codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2();
+ // Are the INS (Insert Bit Field) and ROTR instructions supported?
+ bool has_ins_rotr = codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2();
switch (type) {
case Primitive::kPrimInt: {
Register dst = locations->Out().AsRegister<Register>();
Register lhs = locations->InAt(0).AsRegister<Register>();
if (use_imm) {
- if (instr->IsShl()) {
+ if (shift_value == 0) {
+ if (dst != lhs) {
+ __ Move(dst, lhs);
+ }
+ } else if (instr->IsShl()) {
__ Sll(dst, lhs, shift_value);
} else if (instr->IsShr()) {
__ Sra(dst, lhs, shift_value);
- } else {
+ } else if (instr->IsUShr()) {
__ Srl(dst, lhs, shift_value);
+ } else {
+ if (has_ins_rotr) {
+ __ Rotr(dst, lhs, shift_value);
+ } else {
+ __ Sll(TMP, lhs, (kMipsBitsPerWord - shift_value) & shift_mask);
+ __ Srl(dst, lhs, shift_value);
+ __ Or(dst, dst, TMP);
+ }
}
} else {
if (instr->IsShl()) {
__ Sllv(dst, lhs, rhs_reg);
} else if (instr->IsShr()) {
__ Srav(dst, lhs, rhs_reg);
- } else {
+ } else if (instr->IsUShr()) {
__ Srlv(dst, lhs, rhs_reg);
+ } else {
+ if (has_ins_rotr) {
+ __ Rotrv(dst, lhs, rhs_reg);
+ } else {
+ __ Subu(TMP, ZERO, rhs_reg);
+ __ Sllv(TMP, lhs, TMP);
+ __ Srlv(dst, lhs, rhs_reg);
+ __ Or(dst, dst, TMP);
+ }
}
}
break;
@@ -1580,7 +1598,7 @@
if (shift_value == 0) {
codegen_->Move64(locations->Out(), locations->InAt(0));
} else if (shift_value < kMipsBitsPerWord) {
- if (has_ins) {
+ if (has_ins_rotr) {
if (instr->IsShl()) {
__ Srl(dst_high, lhs_low, kMipsBitsPerWord - shift_value);
__ Ins(dst_high, lhs_high, shift_value, kMipsBitsPerWord - shift_value);
@@ -1589,10 +1607,15 @@
__ Srl(dst_low, lhs_low, shift_value);
__ Ins(dst_low, lhs_high, kMipsBitsPerWord - shift_value, shift_value);
__ Sra(dst_high, lhs_high, shift_value);
+ } else if (instr->IsUShr()) {
+ __ Srl(dst_low, lhs_low, shift_value);
+ __ Ins(dst_low, lhs_high, kMipsBitsPerWord - shift_value, shift_value);
+ __ Srl(dst_high, lhs_high, shift_value);
} else {
__ Srl(dst_low, lhs_low, shift_value);
__ Ins(dst_low, lhs_high, kMipsBitsPerWord - shift_value, shift_value);
__ Srl(dst_high, lhs_high, shift_value);
+ __ Ins(dst_high, lhs_low, kMipsBitsPerWord - shift_value, shift_value);
}
} else {
if (instr->IsShl()) {
@@ -1605,11 +1628,18 @@
__ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
__ Srl(dst_low, lhs_low, shift_value);
__ Or(dst_low, dst_low, TMP);
- } else {
+ } else if (instr->IsUShr()) {
__ Srl(dst_high, lhs_high, shift_value);
__ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
__ Srl(dst_low, lhs_low, shift_value);
__ Or(dst_low, dst_low, TMP);
+ } else {
+ __ Srl(TMP, lhs_low, shift_value);
+ __ Sll(dst_low, lhs_high, kMipsBitsPerWord - shift_value);
+ __ Or(dst_low, dst_low, TMP);
+ __ Srl(TMP, lhs_high, shift_value);
+ __ Sll(dst_high, lhs_low, kMipsBitsPerWord - shift_value);
+ __ Or(dst_high, dst_high, TMP);
}
}
} else {
@@ -1620,9 +1650,29 @@
} else if (instr->IsShr()) {
__ Sra(dst_low, lhs_high, shift_value);
__ Sra(dst_high, dst_low, kMipsBitsPerWord - 1);
- } else {
+ } else if (instr->IsUShr()) {
__ Srl(dst_low, lhs_high, shift_value);
__ Move(dst_high, ZERO);
+ } else {
+ if (shift_value == 0) {
+ // 64-bit rotation by 32 is just a swap.
+ __ Move(dst_low, lhs_high);
+ __ Move(dst_high, lhs_low);
+ } else {
+ if (has_ins_rotr) {
+ __ Srl(dst_low, lhs_high, shift_value);
+ __ Ins(dst_low, lhs_low, kMipsBitsPerWord - shift_value, shift_value);
+ __ Srl(dst_high, lhs_low, shift_value);
+ __ Ins(dst_high, lhs_high, kMipsBitsPerWord - shift_value, shift_value);
+ } else {
+ __ Sll(TMP, lhs_low, kMipsBitsPerWord - shift_value);
+ __ Srl(dst_low, lhs_high, shift_value);
+ __ Or(dst_low, dst_low, TMP);
+ __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
+ __ Srl(dst_high, lhs_low, shift_value);
+ __ Or(dst_high, dst_high, TMP);
+ }
+ }
}
}
} else {
@@ -1649,7 +1699,7 @@
__ Beqz(TMP, &done);
__ Move(dst_low, dst_high);
__ Sra(dst_high, dst_high, 31);
- } else {
+ } else if (instr->IsUShr()) {
__ Srlv(dst_high, lhs_high, rhs_reg);
__ Nor(AT, ZERO, rhs_reg);
__ Sll(TMP, lhs_high, 1);
@@ -1660,6 +1710,21 @@
__ Beqz(TMP, &done);
__ Move(dst_low, dst_high);
__ Move(dst_high, ZERO);
+ } else {
+ __ Nor(AT, ZERO, rhs_reg);
+ __ Srlv(TMP, lhs_low, rhs_reg);
+ __ Sll(dst_low, lhs_high, 1);
+ __ Sllv(dst_low, dst_low, AT);
+ __ Or(dst_low, dst_low, TMP);
+ __ Srlv(TMP, lhs_high, rhs_reg);
+ __ Sll(dst_high, lhs_low, 1);
+ __ Sllv(dst_high, dst_high, AT);
+ __ Or(dst_high, dst_high, TMP);
+ __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
+ __ Beqz(TMP, &done);
+ __ Move(TMP, dst_high);
+ __ Move(dst_high, dst_low);
+ __ Move(dst_low, TMP);
}
__ Bind(&done);
}
@@ -3357,8 +3422,8 @@
}
void InstructionCodeGeneratorMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) DeoptimizationSlowPathMIPS(deoptimize);
- codegen_->AddSlowPath(slow_path);
+ SlowPathCodeMIPS* slow_path =
+ deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathMIPS>(deoptimize);
GenerateTestAndBranch(deoptimize,
/* condition_input_index */ 0,
slow_path->GetEntryLabel(),
@@ -4539,14 +4604,12 @@
codegen_->GenerateFrameExit();
}
-void LocationsBuilderMIPS::VisitRor(HRor* ror ATTRIBUTE_UNUSED) {
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
+void LocationsBuilderMIPS::VisitRor(HRor* ror) {
+ HandleShift(ror);
}
-void InstructionCodeGeneratorMIPS::VisitRor(HRor* ror ATTRIBUTE_UNUSED) {
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
+void InstructionCodeGeneratorMIPS::VisitRor(HRor* ror) {
+ HandleShift(ror);
}
void LocationsBuilderMIPS::VisitShl(HShl* shl) {
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 38302ad..c3d4851 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -197,7 +197,7 @@
DISALLOW_COPY_AND_ASSIGN(LocationsBuilderMIPS);
};
-class InstructionCodeGeneratorMIPS : public HGraphVisitor {
+class InstructionCodeGeneratorMIPS : public InstructionCodeGenerator {
public:
InstructionCodeGeneratorMIPS(HGraph* graph, CodeGeneratorMIPS* codegen);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 7682ca7..abfaae4 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -391,24 +391,24 @@
class DeoptimizationSlowPathMIPS64 : public SlowPathCodeMIPS64 {
public:
- explicit DeoptimizationSlowPathMIPS64(HInstruction* instruction)
+ explicit DeoptimizationSlowPathMIPS64(HDeoptimize* instruction)
: instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
- DCHECK(instruction_->IsDeoptimize());
- HDeoptimize* deoptimize = instruction_->AsDeoptimize();
- uint32_t dex_pc = deoptimize->GetDexPc();
- CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
- mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
+ mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
CheckEntrypointTypes<kQuickDeoptimize, void, void>();
}
const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS64"; }
private:
- HInstruction* const instruction_;
+ HDeoptimize* const instruction_;
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS64);
};
@@ -1113,7 +1113,7 @@
InstructionCodeGeneratorMIPS64::InstructionCodeGeneratorMIPS64(HGraph* graph,
CodeGeneratorMIPS64* codegen)
- : HGraphVisitor(graph),
+ : InstructionCodeGenerator(graph, codegen),
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
@@ -1247,7 +1247,7 @@
}
void LocationsBuilderMIPS64::HandleShift(HBinaryOperation* instr) {
- DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
+ DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
Primitive::Type type = instr->GetResultType();
@@ -1265,7 +1265,7 @@
}
void InstructionCodeGeneratorMIPS64::HandleShift(HBinaryOperation* instr) {
- DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
+ DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
LocationSummary* locations = instr->GetLocations();
Primitive::Type type = instr->GetType();
@@ -1290,13 +1290,19 @@
? static_cast<uint32_t>(rhs_imm & kMaxIntShiftValue)
: static_cast<uint32_t>(rhs_imm & kMaxLongShiftValue);
- if (type == Primitive::kPrimInt) {
+ if (shift_value == 0) {
+ if (dst != lhs) {
+ __ Move(dst, lhs);
+ }
+ } else if (type == Primitive::kPrimInt) {
if (instr->IsShl()) {
__ Sll(dst, lhs, shift_value);
} else if (instr->IsShr()) {
__ Sra(dst, lhs, shift_value);
- } else {
+ } else if (instr->IsUShr()) {
__ Srl(dst, lhs, shift_value);
+ } else {
+ __ Rotr(dst, lhs, shift_value);
}
} else {
if (shift_value < 32) {
@@ -1304,8 +1310,10 @@
__ Dsll(dst, lhs, shift_value);
} else if (instr->IsShr()) {
__ Dsra(dst, lhs, shift_value);
- } else {
+ } else if (instr->IsUShr()) {
__ Dsrl(dst, lhs, shift_value);
+ } else {
+ __ Drotr(dst, lhs, shift_value);
}
} else {
shift_value -= 32;
@@ -1313,8 +1321,10 @@
__ Dsll32(dst, lhs, shift_value);
} else if (instr->IsShr()) {
__ Dsra32(dst, lhs, shift_value);
- } else {
+ } else if (instr->IsUShr()) {
__ Dsrl32(dst, lhs, shift_value);
+ } else {
+ __ Drotr32(dst, lhs, shift_value);
}
}
}
@@ -1324,16 +1334,20 @@
__ Sllv(dst, lhs, rhs_reg);
} else if (instr->IsShr()) {
__ Srav(dst, lhs, rhs_reg);
- } else {
+ } else if (instr->IsUShr()) {
__ Srlv(dst, lhs, rhs_reg);
+ } else {
+ __ Rotrv(dst, lhs, rhs_reg);
}
} else {
if (instr->IsShl()) {
__ Dsllv(dst, lhs, rhs_reg);
} else if (instr->IsShr()) {
__ Dsrav(dst, lhs, rhs_reg);
- } else {
+ } else if (instr->IsUShr()) {
__ Dsrlv(dst, lhs, rhs_reg);
+ } else {
+ __ Drotrv(dst, lhs, rhs_reg);
}
}
}
@@ -2735,9 +2749,8 @@
}
void InstructionCodeGeneratorMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
- SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena())
- DeoptimizationSlowPathMIPS64(deoptimize);
- codegen_->AddSlowPath(slow_path);
+ SlowPathCodeMIPS64* slow_path =
+ deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathMIPS64>(deoptimize);
GenerateTestAndBranch(deoptimize,
/* condition_input_index */ 0,
slow_path->GetEntryLabel(),
@@ -3725,14 +3738,12 @@
codegen_->GenerateFrameExit();
}
-void LocationsBuilderMIPS64::VisitRor(HRor* ror ATTRIBUTE_UNUSED) {
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
+void LocationsBuilderMIPS64::VisitRor(HRor* ror) {
+ HandleShift(ror);
}
-void InstructionCodeGeneratorMIPS64::VisitRor(HRor* ror ATTRIBUTE_UNUSED) {
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
+void InstructionCodeGeneratorMIPS64::VisitRor(HRor* ror) {
+ HandleShift(ror);
}
void LocationsBuilderMIPS64::VisitShl(HShl* shl) {
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 60ff96d..7182e8e 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -201,7 +201,7 @@
DISALLOW_COPY_AND_ASSIGN(LocationsBuilderMIPS64);
};
-class InstructionCodeGeneratorMIPS64 : public HGraphVisitor {
+class InstructionCodeGeneratorMIPS64 : public InstructionCodeGenerator {
public:
InstructionCodeGeneratorMIPS64(HGraph* graph, CodeGeneratorMIPS64* codegen);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 4a0c2f4..c24d258 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -365,11 +365,10 @@
class DeoptimizationSlowPathX86 : public SlowPathCode {
public:
- explicit DeoptimizationSlowPathX86(HInstruction* instruction)
+ explicit DeoptimizationSlowPathX86(HDeoptimize* instruction)
: instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- DCHECK(instruction_->IsDeoptimize());
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
@@ -383,7 +382,7 @@
const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathX86"; }
private:
- HInstruction* const instruction_;
+ HDeoptimize* const instruction_;
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathX86);
};
@@ -892,7 +891,7 @@
}
InstructionCodeGeneratorX86::InstructionCodeGeneratorX86(HGraph* graph, CodeGeneratorX86* codegen)
- : HGraphVisitor(graph),
+ : InstructionCodeGenerator(graph, codegen),
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
@@ -1611,9 +1610,7 @@
}
void InstructionCodeGeneratorX86::VisitDeoptimize(HDeoptimize* deoptimize) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena())
- DeoptimizationSlowPathX86(deoptimize);
- codegen_->AddSlowPath(slow_path);
+ SlowPathCode* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathX86>(deoptimize);
GenerateTestAndBranch(deoptimize,
/* condition_input_index */ 0,
slow_path->GetEntryLabel(),
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index df73476..c65c423 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -178,7 +178,7 @@
DISALLOW_COPY_AND_ASSIGN(LocationsBuilderX86);
};
-class InstructionCodeGeneratorX86 : public HGraphVisitor {
+class InstructionCodeGeneratorX86 : public InstructionCodeGenerator {
public:
InstructionCodeGeneratorX86(HGraph* graph, CodeGeneratorX86* codegen);
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index ec62d84..294b40e 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -387,18 +387,16 @@
class DeoptimizationSlowPathX86_64 : public SlowPathCode {
public:
- explicit DeoptimizationSlowPathX86_64(HInstruction* instruction)
+ explicit DeoptimizationSlowPathX86_64(HDeoptimize* instruction)
: instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
- DCHECK(instruction_->IsDeoptimize());
- HDeoptimize* deoptimize = instruction_->AsDeoptimize();
x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
- deoptimize,
- deoptimize->GetDexPc(),
+ instruction_,
+ instruction_->GetDexPc(),
this);
CheckEntrypointTypes<kQuickDeoptimize, void, void>();
}
@@ -406,7 +404,7 @@
const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathX86_64"; }
private:
- HInstruction* const instruction_;
+ HDeoptimize* const instruction_;
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathX86_64);
};
@@ -1000,7 +998,7 @@
InstructionCodeGeneratorX86_64::InstructionCodeGeneratorX86_64(HGraph* graph,
CodeGeneratorX86_64* codegen)
- : HGraphVisitor(graph),
+ : InstructionCodeGenerator(graph, codegen),
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
@@ -1594,9 +1592,7 @@
}
void InstructionCodeGeneratorX86_64::VisitDeoptimize(HDeoptimize* deoptimize) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena())
- DeoptimizationSlowPathX86_64(deoptimize);
- codegen_->AddSlowPath(slow_path);
+ SlowPathCode* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathX86_64>(deoptimize);
GenerateTestAndBranch(deoptimize,
/* condition_input_index */ 0,
slow_path->GetEntryLabel(),
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index c5e8a04..505c9dc 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -183,7 +183,7 @@
DISALLOW_COPY_AND_ASSIGN(LocationsBuilderX86_64);
};
-class InstructionCodeGeneratorX86_64 : public HGraphVisitor {
+class InstructionCodeGeneratorX86_64 : public InstructionCodeGenerator {
public:
InstructionCodeGeneratorX86_64(HGraph* graph, CodeGeneratorX86_64* codegen);
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index c504ded..b90afb1 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -211,19 +211,6 @@
// Try to replace a binary operation flanked by one UShr and one Shl with a bitfield rotation.
bool InstructionSimplifierVisitor::TryReplaceWithRotate(HBinaryOperation* op) {
- // This simplification is currently supported on x86, x86_64, ARM and ARM64.
- // TODO: Implement it for MIPS/64.
- const InstructionSet instruction_set = GetGraph()->GetInstructionSet();
- switch (instruction_set) {
- case kArm:
- case kArm64:
- case kThumb2:
- case kX86:
- case kX86_64:
- break;
- default:
- return false;
- }
DCHECK(op->IsAdd() || op->IsXor() || op->IsOr());
HInstruction* left = op->GetLeft();
HInstruction* right = op->GetRight();
@@ -1261,19 +1248,6 @@
void InstructionSimplifierVisitor::SimplifyRotate(HInvoke* invoke, bool is_left) {
DCHECK(invoke->IsInvokeStaticOrDirect());
DCHECK_EQ(invoke->GetOriginalInvokeType(), InvokeType::kStatic);
- // This simplification is currently supported on x86, x86_64, ARM and ARM64.
- // TODO: Implement it for MIPS/64.
- const InstructionSet instruction_set = GetGraph()->GetInstructionSet();
- switch (instruction_set) {
- case kArm:
- case kArm64:
- case kThumb2:
- case kX86:
- case kX86_64:
- break;
- default:
- return;
- }
HInstruction* value = invoke->InputAt(0);
HInstruction* distance = invoke->InputAt(1);
// Replace the invoke with an HRor.
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 1e6b3a1..b1fbf28 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -847,6 +847,9 @@
}
// Prevent reordering with prior memory operations.
+ // Emit a DMB ISH instruction instead of an DMB ISHST one, as the
+ // latter allows a preceding load to be delayed past the STXR
+ // instruction below.
__ dmb(ISH);
__ add(tmp_ptr, base, ShifterOperand(offset));
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index f723940..81cab86 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1035,7 +1035,11 @@
__ Stlxr(tmp_32, value, MemOperand(tmp_ptr));
__ Cbnz(tmp_32, &loop_head);
} else {
- __ Dmb(InnerShareable, BarrierWrites);
+ // Emit a `Dmb(InnerShareable, BarrierAll)` (DMB ISH) instruction
+ // instead of a `Dmb(InnerShareable, BarrierWrites)` (DMB ISHST)
+ // one, as the latter allows a preceding load to be delayed past
+ // the STXR instruction below.
+ __ Dmb(InnerShareable, BarrierAll);
__ Bind(&loop_head);
// TODO: When `type == Primitive::kPrimNot`, add a read barrier for
// the reference stored in the object before attempting the CAS,
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 6d4275d..8de9700 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2146,10 +2146,7 @@
IntrinsicExceptions exceptions) {
intrinsic_ = intrinsic;
IntrinsicOptimizations opt(this);
- if (needs_env_or_cache == kNoEnvironmentOrCache) {
- opt.SetDoesNotNeedDexCache();
- opt.SetDoesNotNeedEnvironment();
- }
+
// Adjust method's side effects from intrinsic table.
switch (side_effects) {
case kNoSideEffects: SetSideEffects(SideEffects::None()); break;
@@ -2157,6 +2154,14 @@
case kWriteSideEffects: SetSideEffects(SideEffects::AllWrites()); break;
case kAllSideEffects: SetSideEffects(SideEffects::AllExceptGCDependency()); break;
}
+
+ if (needs_env_or_cache == kNoEnvironmentOrCache) {
+ opt.SetDoesNotNeedDexCache();
+ opt.SetDoesNotNeedEnvironment();
+ } else {
+ // If we need an environment, that means there will be a call, which can trigger GC.
+ SetSideEffects(GetSideEffects().Union(SideEffects::CanTriggerGC()));
+ }
// Adjust method's exception status from intrinsic table.
switch (exceptions) {
case kNoThrow: SetCanThrow(false); break;
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index c06d164..b65d0f5 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1868,6 +1868,10 @@
return false;
}
+ virtual bool IsActualObject() const {
+ return GetType() == Primitive::kPrimNot;
+ }
+
void SetReferenceTypeInfo(ReferenceTypeInfo rti);
ReferenceTypeInfo GetReferenceTypeInfo() const {
@@ -2487,8 +2491,10 @@
// Deoptimize to interpreter, upon checking a condition.
class HDeoptimize : public HTemplateInstruction<1> {
public:
+ // We set CanTriggerGC to prevent any intermediate address to be live
+ // at the point of the `HDeoptimize`.
HDeoptimize(HInstruction* cond, uint32_t dex_pc)
- : HTemplateInstruction(SideEffects::None(), dex_pc) {
+ : HTemplateInstruction(SideEffects::CanTriggerGC(), dex_pc) {
SetRawInputAt(0, cond);
}
@@ -4526,8 +4532,10 @@
class HNullCheck : public HExpression<1> {
public:
+ // `HNullCheck` can trigger GC, as it may call the `NullPointerException`
+ // constructor.
HNullCheck(HInstruction* value, uint32_t dex_pc)
- : HExpression(value->GetType(), SideEffects::None(), dex_pc) {
+ : HExpression(value->GetType(), SideEffects::CanTriggerGC(), dex_pc) {
SetRawInputAt(0, value);
}
@@ -4848,8 +4856,10 @@
class HBoundsCheck : public HExpression<2> {
public:
+ // `HBoundsCheck` can trigger GC, as it may call the `IndexOutOfBoundsException`
+ // constructor.
HBoundsCheck(HInstruction* index, HInstruction* length, uint32_t dex_pc)
- : HExpression(index->GetType(), SideEffects::None(), dex_pc) {
+ : HExpression(index->GetType(), SideEffects::CanTriggerGC(), dex_pc) {
DCHECK(index->GetType() == Primitive::kPrimInt);
SetRawInputAt(0, index);
SetRawInputAt(1, length);
diff --git a/compiler/optimizing/nodes_arm64.h b/compiler/optimizing/nodes_arm64.h
index 18405f2..445cdab 100644
--- a/compiler/optimizing/nodes_arm64.h
+++ b/compiler/optimizing/nodes_arm64.h
@@ -107,6 +107,7 @@
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; }
+ bool IsActualObject() const OVERRIDE { return false; }
HInstruction* GetBaseAddress() const { return InputAt(0); }
HInstruction* GetOffset() const { return InputAt(1); }
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 3eb7274..988e32b 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -17,6 +17,7 @@
#include "optimizing_compiler.h"
#include <fstream>
+#include <memory>
#include <stdint.h>
#ifdef ART_ENABLE_CODEGEN_arm64
@@ -52,6 +53,8 @@
#include "driver/compiler_driver-inl.h"
#include "driver/compiler_options.h"
#include "driver/dex_compilation_unit.h"
+#include "dwarf/method_debug_info.h"
+#include "elf_writer_debug.h"
#include "elf_writer_quick.h"
#include "graph_checker.h"
#include "graph_visualizer.h"
@@ -60,6 +63,7 @@
#include "inliner.h"
#include "instruction_simplifier.h"
#include "intrinsics.h"
+#include "jit/debugger_interface.h"
#include "jit/jit_code_cache.h"
#include "licm.h"
#include "jni/quick/jni_compiler.h"
@@ -68,6 +72,7 @@
#include "prepare_for_register_allocation.h"
#include "reference_type_propagation.h"
#include "register_allocator.h"
+#include "oat_quick_method_header.h"
#include "sharpening.h"
#include "side_effects_analysis.h"
#include "ssa_builder.h"
@@ -968,6 +973,39 @@
return false;
}
+ if (GetCompilerDriver()->GetCompilerOptions().GetGenerateDebugInfo()) {
+ const auto* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code);
+ const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode());
+ CompiledMethod compiled_method(
+ GetCompilerDriver(),
+ codegen->GetInstructionSet(),
+ ArrayRef<const uint8_t>(code_allocator.GetMemory()),
+ codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(),
+ codegen->GetCoreSpillMask(),
+ codegen->GetFpuSpillMask(),
+ ArrayRef<const SrcMapElem>(),
+ ArrayRef<const uint8_t>(), // mapping_table.
+ ArrayRef<const uint8_t>(stack_map_data, stack_map_size),
+ ArrayRef<const uint8_t>(), // native_gc_map.
+ ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()),
+ ArrayRef<const LinkerPatch>());
+ dwarf::MethodDebugInfo method_debug_info {
+ dex_file,
+ class_def_idx,
+ method_idx,
+ access_flags,
+ code_item,
+ false, // deduped.
+ code_address,
+ code_address + code_allocator.GetSize(),
+ &compiled_method
+ };
+ ArrayRef<const uint8_t> elf_file = dwarf::WriteDebugElfFileForMethod(method_debug_info);
+ CreateJITCodeEntryForAddress(code_address,
+ std::unique_ptr<const uint8_t[]>(elf_file.data()),
+ elf_file.size());
+ }
+
return true;
}
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index d399bc2..eb0419b 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -1677,6 +1677,9 @@
LocationSummary* locations = safepoint_position->GetLocations();
if ((current->GetType() == Primitive::kPrimNot) && current->GetParent()->HasSpillSlot()) {
+ DCHECK(interval->GetDefinedBy()->IsActualObject())
+ << interval->GetDefinedBy()->DebugName()
+ << "@" << safepoint_position->GetInstruction()->DebugName();
locations->SetStackBit(current->GetParent()->GetSpillSlot() / kVRegSize);
}
@@ -1689,6 +1692,9 @@
maximum_number_of_live_fp_registers_);
}
if (current->GetType() == Primitive::kPrimNot) {
+ DCHECK(interval->GetDefinedBy()->IsActualObject())
+ << interval->GetDefinedBy()->DebugName()
+ << "@" << safepoint_position->GetInstruction()->DebugName();
locations->SetRegisterBit(source.reg());
}
break;
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index b79c2f0..f96376d 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -501,6 +501,8 @@
virtual void cmp(Register rn, const ShifterOperand& so, Condition cond = AL) = 0;
+ // Note: CMN updates flags based on addition of its operands. Do not confuse
+ // the "N" suffix with bitwise inversion performed by MVN.
virtual void cmn(Register rn, const ShifterOperand& so, Condition cond = AL) = 0;
virtual void orr(Register rd, Register rn, const ShifterOperand& so,
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index f341030..52023a6 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -3428,10 +3428,10 @@
CHECK(rn != IP);
// If rd != rn, use rd as temp. This alows 16-bit ADD/SUB in more situations than using IP.
Register temp = (rd != rn) ? rd : IP;
- if (ShifterOperandCanHold(temp, kNoRegister, MVN, ~value, set_cc, &shifter_op)) {
+ if (ShifterOperandCanHold(temp, kNoRegister, MVN, ~value, kCcKeep, &shifter_op)) {
mvn(temp, shifter_op, cond, kCcKeep);
add(rd, rn, ShifterOperand(temp), cond, set_cc);
- } else if (ShifterOperandCanHold(temp, kNoRegister, MVN, ~(-value), set_cc, &shifter_op)) {
+ } else if (ShifterOperandCanHold(temp, kNoRegister, MVN, ~(-value), kCcKeep, &shifter_op)) {
mvn(temp, shifter_op, cond, kCcKeep);
sub(rd, rn, ShifterOperand(temp), cond, set_cc);
} else if (High16Bits(-value) == 0) {
@@ -3449,22 +3449,32 @@
}
void Thumb2Assembler::CmpConstant(Register rn, int32_t value, Condition cond) {
- // We prefer to select the shorter code sequence rather than selecting add for
- // positive values and sub for negatives ones, which would slightly improve
- // the readability of generated code for some constants.
+ // We prefer to select the shorter code sequence rather than using plain cmp and cmn
+ // which would slightly improve the readability of generated code for some constants.
ShifterOperand shifter_op;
if (ShifterOperandCanHold(kNoRegister, rn, CMP, value, kCcSet, &shifter_op)) {
cmp(rn, shifter_op, cond);
- } else if (ShifterOperandCanHold(kNoRegister, rn, CMN, ~value, kCcSet, &shifter_op)) {
+ } else if (ShifterOperandCanHold(kNoRegister, rn, CMN, -value, kCcSet, &shifter_op)) {
cmn(rn, shifter_op, cond);
} else {
CHECK(rn != IP);
- movw(IP, Low16Bits(value), cond);
- uint16_t value_high = High16Bits(value);
- if (value_high != 0) {
- movt(IP, value_high, cond);
+ if (ShifterOperandCanHold(IP, kNoRegister, MVN, ~value, kCcKeep, &shifter_op)) {
+ mvn(IP, shifter_op, cond, kCcKeep);
+ cmp(rn, ShifterOperand(IP), cond);
+ } else if (ShifterOperandCanHold(IP, kNoRegister, MVN, ~(-value), kCcKeep, &shifter_op)) {
+ mvn(IP, shifter_op, cond, kCcKeep);
+ cmn(rn, ShifterOperand(IP), cond);
+ } else if (High16Bits(-value) == 0) {
+ movw(IP, Low16Bits(-value), cond);
+ cmn(rn, ShifterOperand(IP), cond);
+ } else {
+ movw(IP, Low16Bits(value), cond);
+ uint16_t value_high = High16Bits(value);
+ if (value_high != 0) {
+ movt(IP, value_high, cond);
+ }
+ cmp(rn, ShifterOperand(IP), cond);
}
- cmp(rn, ShifterOperand(IP), cond);
}
}
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 0ef0dc1..2df9b17 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -1626,6 +1626,76 @@
EmitAndCheck(&assembler, "AddConstant");
}
+TEST(Thumb2AssemblerTest, CmpConstant) {
+ arm::Thumb2Assembler assembler;
+
+ __ CmpConstant(R0, 0); // 16-bit CMP.
+ __ CmpConstant(R1, 1); // 16-bit CMP.
+ __ CmpConstant(R0, 7); // 16-bit CMP.
+ __ CmpConstant(R1, 8); // 16-bit CMP.
+ __ CmpConstant(R0, 255); // 16-bit CMP.
+ __ CmpConstant(R1, 256); // 32-bit CMP.
+ __ CmpConstant(R0, 257); // MNV+CMN.
+ __ CmpConstant(R1, 0xfff); // MOVW+CMP.
+ __ CmpConstant(R0, 0x1000); // 32-bit CMP.
+ __ CmpConstant(R1, 0x1001); // MNV+CMN.
+ __ CmpConstant(R0, 0x1002); // MOVW+CMP.
+ __ CmpConstant(R1, 0xffff); // MOVW+CMP.
+ __ CmpConstant(R0, 0x10000); // 32-bit CMP.
+ __ CmpConstant(R1, 0x10001); // 32-bit CMP.
+ __ CmpConstant(R0, 0x10002); // MVN+CMN.
+ __ CmpConstant(R1, 0x10003); // MOVW+MOVT+CMP.
+ __ CmpConstant(R0, -1); // 32-bit CMP.
+ __ CmpConstant(R1, -7); // CMN.
+ __ CmpConstant(R0, -8); // CMN.
+ __ CmpConstant(R1, -255); // CMN.
+ __ CmpConstant(R0, -256); // CMN.
+ __ CmpConstant(R1, -257); // MNV+CMP.
+ __ CmpConstant(R0, -0xfff); // MOVW+CMN.
+ __ CmpConstant(R1, -0x1000); // CMN.
+ __ CmpConstant(R0, -0x1001); // MNV+CMP.
+ __ CmpConstant(R1, -0x1002); // MOVW+CMN.
+ __ CmpConstant(R0, -0xffff); // MOVW+CMN.
+ __ CmpConstant(R1, -0x10000); // CMN.
+ __ CmpConstant(R0, -0x10001); // CMN.
+ __ CmpConstant(R1, -0x10002); // MVN+CMP.
+ __ CmpConstant(R0, -0x10003); // MOVW+MOVT+CMP.
+
+ __ CmpConstant(R8, 0); // 32-bit CMP.
+ __ CmpConstant(R9, 1); // 32-bit CMP.
+ __ CmpConstant(R8, 7); // 32-bit CMP.
+ __ CmpConstant(R9, 8); // 32-bit CMP.
+ __ CmpConstant(R8, 255); // 32-bit CMP.
+ __ CmpConstant(R9, 256); // 32-bit CMP.
+ __ CmpConstant(R8, 257); // MNV+CMN
+ __ CmpConstant(R9, 0xfff); // MOVW+CMP.
+ __ CmpConstant(R8, 0x1000); // 32-bit CMP.
+ __ CmpConstant(R9, 0x1001); // MVN+CMN.
+ __ CmpConstant(R8, 0x1002); // MOVW+CMP.
+ __ CmpConstant(R9, 0xffff); // MOVW+CMP.
+ __ CmpConstant(R8, 0x10000); // 32-bit CMP.
+ __ CmpConstant(R9, 0x10001); // 32-bit CMP.
+ __ CmpConstant(R8, 0x10002); // MVN+CMN.
+ __ CmpConstant(R9, 0x10003); // MOVW+MOVT+CMP.
+ __ CmpConstant(R8, -1); // 32-bit CMP
+ __ CmpConstant(R9, -7); // CMN.
+ __ CmpConstant(R8, -8); // CMN.
+ __ CmpConstant(R9, -255); // CMN.
+ __ CmpConstant(R8, -256); // CMN.
+ __ CmpConstant(R9, -257); // MNV+CMP.
+ __ CmpConstant(R8, -0xfff); // MOVW+CMN.
+ __ CmpConstant(R9, -0x1000); // CMN.
+ __ CmpConstant(R8, -0x1001); // MVN+CMP.
+ __ CmpConstant(R9, -0x1002); // MOVW+CMN.
+ __ CmpConstant(R8, -0xffff); // MOVW+CMN.
+ __ CmpConstant(R9, -0x10000); // CMN.
+ __ CmpConstant(R8, -0x10001); // CMN.
+ __ CmpConstant(R9, -0x10002); // MVN+CMP.
+ __ CmpConstant(R8, -0x10003); // MOVW+MOVT+CMP.
+
+ EmitAndCheck(&assembler, "CmpConstant");
+}
+
#undef __
} // namespace arm
} // namespace art
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index f07f8c7..6736015 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -1,4 +1,4 @@
-const char* SimpleMovResults[] = {
+const char* const SimpleMovResults[] = {
" 0: 0008 movs r0, r1\n",
" 2: 4608 mov r0, r1\n",
" 4: 46c8 mov r8, r9\n",
@@ -6,18 +6,18 @@
" 8: f04f 0809 mov.w r8, #9\n",
nullptr
};
-const char* SimpleMov32Results[] = {
+const char* const SimpleMov32Results[] = {
" 0: ea4f 0001 mov.w r0, r1\n",
" 4: ea4f 0809 mov.w r8, r9\n",
nullptr
};
-const char* SimpleMovAddResults[] = {
+const char* const SimpleMovAddResults[] = {
" 0: 4608 mov r0, r1\n",
" 2: 1888 adds r0, r1, r2\n",
" 4: 1c08 adds r0, r1, #0\n",
nullptr
};
-const char* DataProcessingRegisterResults[] = {
+const char* const DataProcessingRegisterResults[] = {
" 0: ea6f 0001 mvn.w r0, r1\n",
" 4: eb01 0002 add.w r0, r1, r2\n",
" 8: eba1 0002 sub.w r0, r1, r2\n",
@@ -129,7 +129,7 @@
" 120: eb01 0c00 add.w ip, r1, r0\n",
nullptr
};
-const char* DataProcessingImmediateResults[] = {
+const char* const DataProcessingImmediateResults[] = {
" 0: 2055 movs r0, #85 ; 0x55\n",
" 2: f06f 0055 mvn.w r0, #85 ; 0x55\n",
" 6: f101 0055 add.w r0, r1, #85 ; 0x55\n",
@@ -154,7 +154,7 @@
" 48: 1f48 subs r0, r1, #5\n",
nullptr
};
-const char* DataProcessingModifiedImmediateResults[] = {
+const char* const DataProcessingModifiedImmediateResults[] = {
" 0: f04f 1055 mov.w r0, #5570645 ; 0x550055\n",
" 4: f06f 1055 mvn.w r0, #5570645 ; 0x550055\n",
" 8: f101 1055 add.w r0, r1, #5570645 ; 0x550055\n",
@@ -173,7 +173,7 @@
" 3c: f110 1f55 cmn.w r0, #5570645 ; 0x550055\n",
nullptr
};
-const char* DataProcessingModifiedImmediatesResults[] = {
+const char* const DataProcessingModifiedImmediatesResults[] = {
" 0: f04f 1055 mov.w r0, #5570645 ; 0x550055\n",
" 4: f04f 2055 mov.w r0, #1426085120 ; 0x55005500\n",
" 8: f04f 3055 mov.w r0, #1431655765 ; 0x55555555\n",
@@ -183,7 +183,7 @@
" 18: f44f 70d4 mov.w r0, #424 ; 0x1a8\n",
nullptr
};
-const char* DataProcessingShiftedRegisterResults[] = {
+const char* const DataProcessingShiftedRegisterResults[] = {
" 0: 0123 lsls r3, r4, #4\n",
" 2: 0963 lsrs r3, r4, #5\n",
" 4: 11a3 asrs r3, r4, #6\n",
@@ -201,7 +201,7 @@
" 32: ea5f 0834 movs.w r8, r4, rrx\n",
nullptr
};
-const char* ShiftImmediateResults[] = {
+const char* const ShiftImmediateResults[] = {
" 0: 0123 lsls r3, r4, #4\n",
" 2: 0963 lsrs r3, r4, #5\n",
" 4: 11a3 asrs r3, r4, #6\n",
@@ -219,7 +219,7 @@
" 32: ea5f 0834 movs.w r8, r4, rrx\n",
nullptr
};
-const char* BasicLoadResults[] = {
+const char* const BasicLoadResults[] = {
" 0: 69a3 ldr r3, [r4, #24]\n",
" 2: 7e23 ldrb r3, [r4, #24]\n",
" 4: 8b23 ldrh r3, [r4, #24]\n",
@@ -233,7 +233,7 @@
" 20: f9b4 8018 ldrsh.w r8, [r4, #24]\n",
nullptr
};
-const char* BasicStoreResults[] = {
+const char* const BasicStoreResults[] = {
" 0: 61a3 str r3, [r4, #24]\n",
" 2: 7623 strb r3, [r4, #24]\n",
" 4: 8323 strh r3, [r4, #24]\n",
@@ -243,7 +243,7 @@
" 10: f8a4 8018 strh.w r8, [r4, #24]\n",
nullptr
};
-const char* ComplexLoadResults[] = {
+const char* const ComplexLoadResults[] = {
" 0: 69a3 ldr r3, [r4, #24]\n",
" 2: f854 3f18 ldr.w r3, [r4, #24]!\n",
" 6: f854 3b18 ldr.w r3, [r4], #24\n",
@@ -276,7 +276,7 @@
" 6e: f934 3918 ldrsh.w r3, [r4], #-24\n",
nullptr
};
-const char* ComplexStoreResults[] = {
+const char* const ComplexStoreResults[] = {
" 0: 61a3 str r3, [r4, #24]\n",
" 2: f844 3f18 str.w r3, [r4, #24]!\n",
" 6: f844 3b18 str.w r3, [r4], #24\n",
@@ -297,7 +297,7 @@
" 3e: f824 3918 strh.w r3, [r4], #-24\n",
nullptr
};
-const char* NegativeLoadStoreResults[] = {
+const char* const NegativeLoadStoreResults[] = {
" 0: f854 3c18 ldr.w r3, [r4, #-24]\n",
" 4: f854 3d18 ldr.w r3, [r4, #-24]!\n",
" 8: f854 3918 ldr.w r3, [r4], #-24\n",
@@ -348,12 +348,12 @@
" bc: f824 3b18 strh.w r3, [r4], #24\n",
nullptr
};
-const char* SimpleLoadStoreDualResults[] = {
+const char* const SimpleLoadStoreDualResults[] = {
" 0: e9c0 2306 strd r2, r3, [r0, #24]\n",
" 4: e9d0 2306 ldrd r2, r3, [r0, #24]\n",
nullptr
};
-const char* ComplexLoadStoreDualResults[] = {
+const char* const ComplexLoadStoreDualResults[] = {
" 0: e9c0 2306 strd r2, r3, [r0, #24]\n",
" 4: e9e0 2306 strd r2, r3, [r0, #24]!\n",
" 8: e8e0 2306 strd r2, r3, [r0], #24\n",
@@ -368,7 +368,7 @@
" 2c: e870 2306 ldrd r2, r3, [r0], #-24\n",
nullptr
};
-const char* NegativeLoadStoreDualResults[] = {
+const char* const NegativeLoadStoreDualResults[] = {
" 0: e940 2306 strd r2, r3, [r0, #-24]\n",
" 4: e960 2306 strd r2, r3, [r0, #-24]!\n",
" 8: e860 2306 strd r2, r3, [r0], #-24\n",
@@ -383,7 +383,7 @@
" 2c: e8f0 2306 ldrd r2, r3, [r0], #24\n",
nullptr
};
-const char* SimpleBranchResults[] = {
+const char* const SimpleBranchResults[] = {
" 0: 2002 movs r0, #2\n",
" 2: 2101 movs r1, #1\n",
" 4: e7fd b.n 2 <SimpleBranch+0x2>\n",
@@ -403,7 +403,7 @@
" 20: 2006 movs r0, #6\n",
nullptr
};
-const char* LongBranchResults[] = {
+const char* const LongBranchResults[] = {
" 0: f04f 0002 mov.w r0, #2\n",
" 4: f04f 0101 mov.w r1, #1\n",
" 8: f7ff bffc b.w 4 <LongBranch+0x4>\n",
@@ -423,14 +423,14 @@
" 40: f04f 0006 mov.w r0, #6\n",
nullptr
};
-const char* LoadMultipleResults[] = {
+const char* const LoadMultipleResults[] = {
" 0: cc09 ldmia r4!, {r0, r3}\n",
" 2: e934 4800 ldmdb r4!, {fp, lr}\n",
" 6: e914 4800 ldmdb r4, {fp, lr}\n",
" a: f854 5b04 ldr.w r5, [r4], #4\n",
nullptr
};
-const char* StoreMultipleResults[] = {
+const char* const StoreMultipleResults[] = {
" 0: c409 stmia r4!, {r0, r3}\n",
" 2: e8a4 4800 stmia.w r4!, {fp, lr}\n",
" 6: e884 4800 stmia.w r4, {fp, lr}\n",
@@ -438,7 +438,7 @@
" e: f844 5d04 str.w r5, [r4, #-4]!\n",
nullptr
};
-const char* MovWMovTResults[] = {
+const char* const MovWMovTResults[] = {
" 0: f240 0400 movw r4, #0\n",
" 4: f240 0434 movw r4, #52 ; 0x34\n",
" 8: f240 0934 movw r9, #52 ; 0x34\n",
@@ -449,7 +449,7 @@
" 1c: f6cf 71ff movt r1, #65535 ; 0xffff\n",
nullptr
};
-const char* SpecialAddSubResults[] = {
+const char* const SpecialAddSubResults[] = {
" 0: aa14 add r2, sp, #80 ; 0x50\n",
" 2: b014 add sp, #80 ; 0x50\n",
" 4: f10d 0850 add.w r8, sp, #80 ; 0x50\n",
@@ -463,7 +463,7 @@
" 22: f6ad 7dfc subw sp, sp, #4092 ; 0xffc\n",
nullptr
};
-const char* LoadFromOffsetResults[] = {
+const char* const LoadFromOffsetResults[] = {
" 0: 68e2 ldr r2, [r4, #12]\n",
" 2: f8d4 2fff ldr.w r2, [r4, #4095] ; 0xfff\n",
" 6: f504 5280 add.w r2, r4, #4096 ; 0x1000\n",
@@ -514,7 +514,7 @@
" 9e: f9b4 200c ldrsh.w r2, [r4, #12]\n",
nullptr
};
-const char* StoreToOffsetResults[] = {
+const char* const StoreToOffsetResults[] = {
" 0: 60e2 str r2, [r4, #12]\n",
" 2: f8c4 2fff str.w r2, [r4, #4095] ; 0xfff\n",
" 6: f504 5c80 add.w ip, r4, #4096 ; 0x1000\n",
@@ -563,7 +563,7 @@
" a4: 7322 strb r2, [r4, #12]\n",
nullptr
};
-const char* IfThenResults[] = {
+const char* const IfThenResults[] = {
" 0: bf08 it eq\n",
" 2: 2101 moveq r1, #1\n",
" 4: bf04 itt eq\n",
@@ -587,7 +587,7 @@
" 28: 2404 movne r4, #4\n",
nullptr
};
-const char* CbzCbnzResults[] = {
+const char* const CbzCbnzResults[] = {
" 0: b10a cbz r2, 6 <CbzCbnz+0x6>\n",
" 2: 2103 movs r1, #3\n",
" 4: 2203 movs r2, #3\n",
@@ -598,7 +598,7 @@
" 10: 2204 movs r2, #4\n",
nullptr
};
-const char* MultiplyResults[] = {
+const char* const MultiplyResults[] = {
" 0: 4348 muls r0, r1\n",
" 2: fb01 f002 mul.w r0, r1, r2\n",
" 6: fb09 f808 mul.w r8, r9, r8\n",
@@ -611,21 +611,21 @@
" 22: fbaa 890b umull r8, r9, sl, fp\n",
nullptr
};
-const char* DivideResults[] = {
+const char* const DivideResults[] = {
" 0: fb91 f0f2 sdiv r0, r1, r2\n",
" 4: fb99 f8fa sdiv r8, r9, sl\n",
" 8: fbb1 f0f2 udiv r0, r1, r2\n",
" c: fbb9 f8fa udiv r8, r9, sl\n",
nullptr
};
-const char* VMovResults[] = {
+const char* const VMovResults[] = {
" 0: eef7 0a00 vmov.f32 s1, #112 ; 0x70\n",
" 4: eeb7 1b00 vmov.f64 d1, #112 ; 0x70\n",
" 8: eef0 0a41 vmov.f32 s1, s2\n",
" c: eeb0 1b42 vmov.f64 d1, d2\n",
nullptr
};
-const char* BasicFloatingPointResults[] = {
+const char* const BasicFloatingPointResults[] = {
" 0: ee30 0a81 vadd.f32 s0, s1, s2\n",
" 4: ee30 0ac1 vsub.f32 s0, s1, s2\n",
" 8: ee20 0a81 vmul.f32 s0, s1, s2\n",
@@ -646,7 +646,7 @@
" 44: eeb1 0bc1 vsqrt.f64 d0, d1\n",
nullptr
};
-const char* FloatingPointConversionsResults[] = {
+const char* const FloatingPointConversionsResults[] = {
" 0: eeb7 1bc2 vcvt.f32.f64 s2, d2\n",
" 4: eeb7 2ac1 vcvt.f64.f32 d2, s2\n",
" 8: eefd 0ac1 vcvt.s32.f32 s1, s2\n",
@@ -659,35 +659,35 @@
" 24: eeb8 1b41 vcvt.f64.u32 d1, s2\n",
nullptr
};
-const char* FloatingPointComparisonsResults[] = {
+const char* const FloatingPointComparisonsResults[] = {
" 0: eeb4 0a60 vcmp.f32 s0, s1\n",
" 4: eeb4 0b41 vcmp.f64 d0, d1\n",
" 8: eeb5 1a40 vcmp.f32 s2, #0.0\n",
" c: eeb5 2b40 vcmp.f64 d2, #0.0\n",
nullptr
};
-const char* CallsResults[] = {
+const char* const CallsResults[] = {
" 0: 47f0 blx lr\n",
" 2: 4770 bx lr\n",
nullptr
};
-const char* BreakpointResults[] = {
+const char* const BreakpointResults[] = {
" 0: be00 bkpt 0x0000\n",
nullptr
};
-const char* StrR1Results[] = {
+const char* const StrR1Results[] = {
" 0: 9111 str r1, [sp, #68] ; 0x44\n",
" 2: f8cd 142c str.w r1, [sp, #1068] ; 0x42c\n",
nullptr
};
-const char* VPushPopResults[] = {
+const char* const VPushPopResults[] = {
" 0: ed2d 1a04 vpush {s2-s5}\n",
" 4: ed2d 2b08 vpush {d2-d5}\n",
" 8: ecbd 1a04 vpop {s2-s5}\n",
" c: ecbd 2b08 vpop {d2-d5}\n",
nullptr
};
-const char* Max16BitBranchResults[] = {
+const char* const Max16BitBranchResults[] = {
" 0: e3ff b.n 802 <Max16BitBranch+0x802>\n",
" 2: 2300 movs r3, #0\n",
" 4: 2302 movs r3, #2\n",
@@ -1716,7 +1716,7 @@
" 802: 4611 mov r1, r2\n",
nullptr
};
-const char* Branch32Results[] = {
+const char* const Branch32Results[] = {
" 0: f000 bc01 b.w 806 <Branch32+0x806>\n",
" 4: 2300 movs r3, #0\n",
" 6: 2302 movs r3, #2\n",
@@ -2746,7 +2746,7 @@
" 806: 4611 mov r1, r2\n",
nullptr
};
-const char* CompareAndBranchMaxResults[] = {
+const char* const CompareAndBranchMaxResults[] = {
" 0: b3fc cbz r4, 82 <CompareAndBranchMax+0x82>\n",
" 2: 2300 movs r3, #0\n",
" 4: 2302 movs r3, #2\n",
@@ -2815,7 +2815,7 @@
" 82: 4611 mov r1, r2\n",
nullptr
};
-const char* CompareAndBranchRelocation16Results[] = {
+const char* const CompareAndBranchRelocation16Results[] = {
" 0: 2c00 cmp r4, #0\n",
" 2: d040 beq.n 86 <CompareAndBranchRelocation16+0x86>\n",
" 4: 2300 movs r3, #0\n",
@@ -2886,7 +2886,7 @@
" 86: 4611 mov r1, r2\n",
nullptr
};
-const char* CompareAndBranchRelocation32Results[] = {
+const char* const CompareAndBranchRelocation32Results[] = {
" 0: 2c00 cmp r4, #0\n",
" 2: f000 8401 beq.w 808 <CompareAndBranchRelocation32+0x808>\n",
" 6: 2300 movs r3, #0\n",
@@ -3917,7 +3917,7 @@
" 808: 4611 mov r1, r2\n",
nullptr
};
-const char* MixedBranch32Results[] = {
+const char* const MixedBranch32Results[] = {
" 0: f000 bc03 b.w 80a <MixedBranch32+0x80a>\n",
" 4: 2300 movs r3, #0\n",
" 6: 2302 movs r3, #2\n",
@@ -4948,7 +4948,7 @@
" 80a: 4611 mov r1, r2\n",
nullptr
};
-const char* ShiftsResults[] = {
+const char* const ShiftsResults[] = {
" 0: 0148 lsls r0, r1, #5\n",
" 2: 0948 lsrs r0, r1, #5\n",
" 4: 1148 asrs r0, r1, #5\n",
@@ -4997,7 +4997,7 @@
" 98: fa51 f008 asrs.w r0, r1, r8\n",
nullptr
};
-const char* LoadStoreRegOffsetResults[] = {
+const char* const LoadStoreRegOffsetResults[] = {
" 0: 5888 ldr r0, [r1, r2]\n",
" 2: 5088 str r0, [r1, r2]\n",
" 4: f851 0012 ldr.w r0, [r1, r2, lsl #1]\n",
@@ -5012,7 +5012,7 @@
" 28: f841 0008 str.w r0, [r1, r8]\n",
nullptr
};
-const char* LoadStoreLiteralResults[] = {
+const char* const LoadStoreLiteralResults[] = {
" 0: 4801 ldr r0, [pc, #4] ; (8 <LoadStoreLiteral+0x8>)\n",
" 2: f8cf 0004 str.w r0, [pc, #4] ; 8 <LoadStoreLiteral+0x8>\n",
" 6: f85f 0008 ldr.w r0, [pc, #-8] ; 0 <LoadStoreLiteral>\n",
@@ -5023,7 +5023,7 @@
" 18: f8cf 07ff str.w r0, [pc, #2047] ; 81b <LoadStoreLiteral+0x81b>\n",
nullptr
};
-const char* LoadStoreLimitsResults[] = {
+const char* const LoadStoreLimitsResults[] = {
" 0: 6fe0 ldr r0, [r4, #124] ; 0x7c\n",
" 2: f8d4 0080 ldr.w r0, [r4, #128] ; 0x80\n",
" 6: 7fe0 ldrb r0, [r4, #31]\n",
@@ -5042,7 +5042,7 @@
" 30: f8a4 0040 strh.w r0, [r4, #64] ; 0x40\n",
nullptr
};
-const char* CompareAndBranchResults[] = {
+const char* const CompareAndBranchResults[] = {
" 0: b130 cbz r0, 10 <CompareAndBranch+0x10>\n",
" 2: f1bb 0f00 cmp.w fp, #0\n",
" 6: d003 beq.n 10 <CompareAndBranch+0x10>\n",
@@ -5052,7 +5052,7 @@
nullptr
};
-const char* AddConstantResults[] = {
+const char* const AddConstantResults[] = {
" 0: 4608 mov r0, r1\n",
" 2: 1c48 adds r0, r1, #1\n",
" 4: 1dc8 adds r0, r1, #7\n",
@@ -5370,6 +5370,104 @@
nullptr
};
+const char* const CmpConstantResults[] = {
+ " 0: 2800 cmp r0, #0\n",
+ " 2: 2901 cmp r1, #1\n",
+ " 4: 2807 cmp r0, #7\n",
+ " 6: 2908 cmp r1, #8\n",
+ " 8: 28ff cmp r0, #255 ; 0xff\n",
+ " a: f5b1 7f80 cmp.w r1, #256 ; 0x100\n",
+ " e: f46f 7c80 mvn.w ip, #256 ; 0x100\n",
+ " 12: eb10 0f0c cmn.w r0, ip\n",
+ " 16: f640 7cff movw ip, #4095 ; 0xfff\n",
+ " 1a: 4561 cmp r1, ip\n",
+ " 1c: f5b0 5f80 cmp.w r0, #4096 ; 0x1000\n",
+ " 20: f46f 5c80 mvn.w ip, #4096 ; 0x1000\n",
+ " 24: eb11 0f0c cmn.w r1, ip\n",
+ " 28: f241 0c02 movw ip, #4098 ; 0x1002\n",
+ " 2c: 4560 cmp r0, ip\n",
+ " 2e: f64f 7cff movw ip, #65535 ; 0xffff\n",
+ " 32: 4561 cmp r1, ip\n",
+ " 34: f5b0 3f80 cmp.w r0, #65536 ; 0x10000\n",
+ " 38: f1b1 1f01 cmp.w r1, #65537 ; 0x10001\n",
+ " 3c: f06f 1c01 mvn.w ip, #65537 ; 0x10001\n",
+ " 40: eb10 0f0c cmn.w r0, ip\n",
+ " 44: f240 0c03 movw ip, #3\n",
+ " 48: f2c0 0c01 movt ip, #1\n",
+ " 4c: 4561 cmp r1, ip\n",
+ " 4e: f1b0 3fff cmp.w r0, #4294967295 ; 0xffffffff\n",
+ " 52: f111 0f07 cmn.w r1, #7\n",
+ " 56: f110 0f08 cmn.w r0, #8\n",
+ " 5a: f111 0fff cmn.w r1, #255 ; 0xff\n",
+ " 5e: f510 7f80 cmn.w r0, #256 ; 0x100\n",
+ " 62: f46f 7c80 mvn.w ip, #256 ; 0x100\n",
+ " 66: 4561 cmp r1, ip\n",
+ " 68: f640 7cff movw ip, #4095 ; 0xfff\n",
+ " 6c: eb10 0f0c cmn.w r0, ip\n",
+ " 70: f511 5f80 cmn.w r1, #4096 ; 0x1000\n",
+ " 74: f46f 5c80 mvn.w ip, #4096 ; 0x1000\n",
+ " 78: 4560 cmp r0, ip\n",
+ " 7a: f241 0c02 movw ip, #4098 ; 0x1002\n",
+ " 7e: eb11 0f0c cmn.w r1, ip\n",
+ " 82: f64f 7cff movw ip, #65535 ; 0xffff\n",
+ " 86: eb10 0f0c cmn.w r0, ip\n",
+ " 8a: f511 3f80 cmn.w r1, #65536 ; 0x10000\n",
+ " 8e: f110 1f01 cmn.w r0, #65537 ; 0x10001\n",
+ " 92: f06f 1c01 mvn.w ip, #65537 ; 0x10001\n",
+ " 96: 4561 cmp r1, ip\n",
+ " 98: f64f 7cfd movw ip, #65533 ; 0xfffd\n",
+ " 9c: f6cf 7cfe movt ip, #65534 ; 0xfffe\n",
+ " a0: 4560 cmp r0, ip\n",
+ " a2: f1b8 0f00 cmp.w r8, #0\n",
+ " a6: f1b9 0f01 cmp.w r9, #1\n",
+ " aa: f1b8 0f07 cmp.w r8, #7\n",
+ " ae: f1b9 0f08 cmp.w r9, #8\n",
+ " b2: f1b8 0fff cmp.w r8, #255 ; 0xff\n",
+ " b6: f5b9 7f80 cmp.w r9, #256 ; 0x100\n",
+ " ba: f46f 7c80 mvn.w ip, #256 ; 0x100\n",
+ " be: eb18 0f0c cmn.w r8, ip\n",
+ " c2: f640 7cff movw ip, #4095 ; 0xfff\n",
+ " c6: 45e1 cmp r9, ip\n",
+ " c8: f5b8 5f80 cmp.w r8, #4096 ; 0x1000\n",
+ " cc: f46f 5c80 mvn.w ip, #4096 ; 0x1000\n",
+ " d0: eb19 0f0c cmn.w r9, ip\n",
+ " d4: f241 0c02 movw ip, #4098 ; 0x1002\n",
+ " d8: 45e0 cmp r8, ip\n",
+ " da: f64f 7cff movw ip, #65535 ; 0xffff\n",
+ " de: 45e1 cmp r9, ip\n",
+ " e0: f5b8 3f80 cmp.w r8, #65536 ; 0x10000\n",
+ " e4: f1b9 1f01 cmp.w r9, #65537 ; 0x10001\n",
+ " e8: f06f 1c01 mvn.w ip, #65537 ; 0x10001\n",
+ " ec: eb18 0f0c cmn.w r8, ip\n",
+ " f0: f240 0c03 movw ip, #3\n",
+ " f4: f2c0 0c01 movt ip, #1\n",
+ " f8: 45e1 cmp r9, ip\n",
+ " fa: f1b8 3fff cmp.w r8, #4294967295 ; 0xffffffff\n",
+ " fe: f119 0f07 cmn.w r9, #7\n",
+ " 102: f118 0f08 cmn.w r8, #8\n",
+ " 106: f119 0fff cmn.w r9, #255 ; 0xff\n",
+ " 10a: f518 7f80 cmn.w r8, #256 ; 0x100\n",
+ " 10e: f46f 7c80 mvn.w ip, #256 ; 0x100\n",
+ " 112: 45e1 cmp r9, ip\n",
+ " 114: f640 7cff movw ip, #4095 ; 0xfff\n",
+ " 118: eb18 0f0c cmn.w r8, ip\n",
+ " 11c: f519 5f80 cmn.w r9, #4096 ; 0x1000\n",
+ " 120: f46f 5c80 mvn.w ip, #4096 ; 0x1000\n",
+ " 124: 45e0 cmp r8, ip\n",
+ " 126: f241 0c02 movw ip, #4098 ; 0x1002\n",
+ " 12a: eb19 0f0c cmn.w r9, ip\n",
+ " 12e: f64f 7cff movw ip, #65535 ; 0xffff\n",
+ " 132: eb18 0f0c cmn.w r8, ip\n",
+ " 136: f519 3f80 cmn.w r9, #65536 ; 0x10000\n",
+ " 13a: f118 1f01 cmn.w r8, #65537 ; 0x10001\n",
+ " 13e: f06f 1c01 mvn.w ip, #65537 ; 0x10001\n",
+ " 142: 45e1 cmp r9, ip\n",
+ " 144: f64f 7cfd movw ip, #65533 ; 0xfffd\n",
+ " 148: f6cf 7cfe movt ip, #65534 ; 0xfffe\n",
+ " 14c: 45e0 cmp r8, ip\n",
+ nullptr
+};
+
std::map<std::string, const char* const*> test_results;
void setup_results() {
test_results["SimpleMov"] = SimpleMovResults;
@@ -5421,4 +5519,5 @@
test_results["LoadStoreLimits"] = LoadStoreLimitsResults;
test_results["CompareAndBranch"] = CompareAndBranchResults;
test_results["AddConstant"] = AddConstantResults;
+ test_results["CmpConstant"] = CmpConstantResults;
}
diff --git a/compiler/utils/swap_space.cc b/compiler/utils/swap_space.cc
index 42ed881..244a5fe 100644
--- a/compiler/utils/swap_space.cc
+++ b/compiler/utils/swap_space.cc
@@ -18,6 +18,7 @@
#include <algorithm>
#include <numeric>
+#include <sys/mman.h>
#include "base/logging.h"
#include "base/macros.h"
@@ -44,23 +45,17 @@
}
}
-template <typename FreeByStartSet, typename FreeBySizeSet>
-static void RemoveChunk(FreeByStartSet* free_by_start,
- FreeBySizeSet* free_by_size,
- typename FreeBySizeSet::const_iterator free_by_size_pos) {
+void SwapSpace::RemoveChunk(FreeBySizeSet::const_iterator free_by_size_pos) {
auto free_by_start_pos = free_by_size_pos->second;
- free_by_size->erase(free_by_size_pos);
- free_by_start->erase(free_by_start_pos);
+ free_by_size_.erase(free_by_size_pos);
+ free_by_start_.erase(free_by_start_pos);
}
-template <typename FreeByStartSet, typename FreeBySizeSet>
-static void InsertChunk(FreeByStartSet* free_by_start,
- FreeBySizeSet* free_by_size,
- const SpaceChunk& chunk) {
+inline void SwapSpace::InsertChunk(const SpaceChunk& chunk) {
DCHECK_NE(chunk.size, 0u);
- auto insert_result = free_by_start->insert(chunk);
+ auto insert_result = free_by_start_.insert(chunk);
DCHECK(insert_result.second);
- free_by_size->emplace(chunk.size, insert_result.first);
+ free_by_size_.emplace(chunk.size, insert_result.first);
}
SwapSpace::SwapSpace(int fd, size_t initial_size)
@@ -69,10 +64,18 @@
lock_("SwapSpace lock", static_cast<LockLevel>(LockLevel::kDefaultMutexLevel - 1)) {
// Assume that the file is unlinked.
- InsertChunk(&free_by_start_, &free_by_size_, NewFileChunk(initial_size));
+ InsertChunk(NewFileChunk(initial_size));
}
SwapSpace::~SwapSpace() {
+ // Unmap all mmapped chunks. Nothing should be allocated anymore at
+ // this point, so there should be only full size chunks in free_by_start_.
+ for (const SpaceChunk& chunk : free_by_start_) {
+ if (munmap(chunk.ptr, chunk.size) != 0) {
+ PLOG(ERROR) << "Failed to unmap swap space chunk at "
+ << static_cast<const void*>(chunk.ptr) << " size=" << chunk.size;
+ }
+ }
// All arenas are backed by the same file. Just close the descriptor.
close(fd_);
}
@@ -113,7 +116,7 @@
: free_by_size_.lower_bound(FreeBySizeEntry { size, free_by_start_.begin() });
if (it != free_by_size_.end()) {
old_chunk = *it->second;
- RemoveChunk(&free_by_start_, &free_by_size_, it);
+ RemoveChunk(it);
} else {
// Not a big enough free chunk, need to increase file size.
old_chunk = NewFileChunk(size);
@@ -124,13 +127,13 @@
if (old_chunk.size != size) {
// Insert the remainder.
SpaceChunk new_chunk = { old_chunk.ptr + size, old_chunk.size - size };
- InsertChunk(&free_by_start_, &free_by_size_, new_chunk);
+ InsertChunk(new_chunk);
}
return ret;
}
-SpaceChunk SwapSpace::NewFileChunk(size_t min_size) {
+SwapSpace::SpaceChunk SwapSpace::NewFileChunk(size_t min_size) {
#if !defined(__APPLE__)
size_t next_part = std::max(RoundUp(min_size, kPageSize), RoundUp(kMininumMapSize, kPageSize));
int result = TEMP_FAILURE_RETRY(ftruncate64(fd_, size_ + next_part));
@@ -159,7 +162,7 @@
}
// TODO: Full coalescing.
-void SwapSpace::Free(void* ptrV, size_t size) {
+void SwapSpace::Free(void* ptr, size_t size) {
MutexLock lock(Thread::Current(), lock_);
size = RoundUp(size, 8U);
@@ -168,7 +171,7 @@
free_before = CollectFree(free_by_start_, free_by_size_);
}
- SpaceChunk chunk = { reinterpret_cast<uint8_t*>(ptrV), size };
+ SpaceChunk chunk = { reinterpret_cast<uint8_t*>(ptr), size };
auto it = free_by_start_.lower_bound(chunk);
if (it != free_by_start_.begin()) {
auto prev = it;
@@ -180,7 +183,7 @@
chunk.ptr -= prev->size;
auto erase_pos = free_by_size_.find(FreeBySizeEntry { prev->size, prev });
DCHECK(erase_pos != free_by_size_.end());
- RemoveChunk(&free_by_start_, &free_by_size_, erase_pos);
+ RemoveChunk(erase_pos);
// "prev" is invalidated but "it" remains valid.
}
}
@@ -191,11 +194,11 @@
chunk.size += it->size;
auto erase_pos = free_by_size_.find(FreeBySizeEntry { it->size, it });
DCHECK(erase_pos != free_by_size_.end());
- RemoveChunk(&free_by_start_, &free_by_size_, erase_pos);
+ RemoveChunk(erase_pos);
// "it" is invalidated but we don't need it anymore.
}
}
- InsertChunk(&free_by_start_, &free_by_size_, chunk);
+ InsertChunk(chunk);
if (kCheckFreeMaps) {
size_t free_after = CollectFree(free_by_start_, free_by_size_);
diff --git a/compiler/utils/swap_space.h b/compiler/utils/swap_space.h
index 9127b6b..b659f1d 100644
--- a/compiler/utils/swap_space.h
+++ b/compiler/utils/swap_space.h
@@ -19,42 +19,17 @@
#include <cstdlib>
#include <list>
+#include <vector>
#include <set>
#include <stdint.h>
#include <stddef.h>
-#include "base/debug_stack.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/mutex.h"
-#include "mem_map.h"
namespace art {
-// Chunk of space.
-struct SpaceChunk {
- uint8_t* ptr;
- size_t size;
-
- uintptr_t Start() const {
- return reinterpret_cast<uintptr_t>(ptr);
- }
- uintptr_t End() const {
- return reinterpret_cast<uintptr_t>(ptr) + size;
- }
-};
-
-inline bool operator==(const SpaceChunk& lhs, const SpaceChunk& rhs) {
- return (lhs.size == rhs.size) && (lhs.ptr == rhs.ptr);
-}
-
-class SortChunkByPtr {
- public:
- bool operator()(const SpaceChunk& a, const SpaceChunk& b) const {
- return reinterpret_cast<uintptr_t>(a.ptr) < reinterpret_cast<uintptr_t>(b.ptr);
- }
-};
-
// An arena pool that creates arenas backed by an mmaped file.
class SwapSpace {
public:
@@ -68,17 +43,27 @@
}
private:
- SpaceChunk NewFileChunk(size_t min_size) REQUIRES(lock_);
+ // Chunk of space.
+ struct SpaceChunk {
+ uint8_t* ptr;
+ size_t size;
- int fd_;
- size_t size_;
- std::list<SpaceChunk> maps_;
+ uintptr_t Start() const {
+ return reinterpret_cast<uintptr_t>(ptr);
+ }
+ uintptr_t End() const {
+ return reinterpret_cast<uintptr_t>(ptr) + size;
+ }
+ };
- // NOTE: Boost.Bimap would be useful for the two following members.
+ class SortChunkByPtr {
+ public:
+ bool operator()(const SpaceChunk& a, const SpaceChunk& b) const {
+ return reinterpret_cast<uintptr_t>(a.ptr) < reinterpret_cast<uintptr_t>(b.ptr);
+ }
+ };
- // Map start of a free chunk to its size.
typedef std::set<SpaceChunk, SortChunkByPtr> FreeByStartSet;
- FreeByStartSet free_by_start_ GUARDED_BY(lock_);
// Map size to an iterator to free_by_start_'s entry.
typedef std::pair<size_t, FreeByStartSet::const_iterator> FreeBySizeEntry;
@@ -92,6 +77,21 @@
}
};
typedef std::set<FreeBySizeEntry, FreeBySizeComparator> FreeBySizeSet;
+
+ SpaceChunk NewFileChunk(size_t min_size) REQUIRES(lock_);
+
+ void RemoveChunk(FreeBySizeSet::const_iterator free_by_size_pos) REQUIRES(lock_);
+ void InsertChunk(const SpaceChunk& chunk) REQUIRES(lock_);
+
+ int fd_;
+ size_t size_;
+ std::list<SpaceChunk> maps_;
+
+ // NOTE: Boost.Bimap would be useful for the two following members.
+
+ // Map start of a free chunk to its size.
+ FreeByStartSet free_by_start_ GUARDED_BY(lock_);
+ // Free chunks ordered by size.
FreeBySizeSet free_by_size_ GUARDED_BY(lock_);
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
@@ -126,6 +126,9 @@
template <typename U>
friend class SwapAllocator;
+
+ template <typename U>
+ friend bool operator==(const SwapAllocator<U>& lhs, const SwapAllocator<U>& rhs);
};
template <typename T>
@@ -201,9 +204,22 @@
template <typename U>
friend class SwapAllocator;
+
+ template <typename U>
+ friend bool operator==(const SwapAllocator<U>& lhs, const SwapAllocator<U>& rhs);
};
template <typename T>
+inline bool operator==(const SwapAllocator<T>& lhs, const SwapAllocator<T>& rhs) {
+ return lhs.swap_space_ == rhs.swap_space_;
+}
+
+template <typename T>
+inline bool operator!=(const SwapAllocator<T>& lhs, const SwapAllocator<T>& rhs) {
+ return !(lhs == rhs);
+}
+
+template <typename T>
using SwapVector = std::vector<T, SwapAllocator<T>>;
template <typename T, typename Comparator>
using SwapSet = std::set<T, Comparator, SwapAllocator<T>>;
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 32a237a..808643a 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -313,13 +313,12 @@
UsageError(" -g");
UsageError(" --generate-debug-info: Generate debug information for native debugging,");
UsageError(" such as stack unwinding information, ELF symbols and DWARF sections.");
- UsageError(" This generates all the available information. Unneeded parts can be");
- UsageError(" stripped using standard command line tools such as strip or objcopy.");
- UsageError(" (enabled by default in debug builds, disabled by default otherwise)");
+ UsageError(" If used without --native-debuggable, it will be best-effort only.");
+ UsageError(" This option does not affect the generated code. (disabled by default)");
UsageError("");
UsageError(" --no-generate-debug-info: Do not generate debug information for native debugging.");
UsageError("");
- UsageError(" --debuggable: Produce code debuggable with Java debugger. Implies -g.");
+ UsageError(" --debuggable: Produce code debuggable with Java debugger.");
UsageError("");
UsageError(" --native-debuggable: Produce code debuggable with native debugger (like LLDB).");
UsageError(" Implies --debuggable.");
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index ee7b21c..f922687 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -56,7 +56,7 @@
// R-type instructions.
{ kRTypeMask, 0, "sll", "DTA", },
// 0, 1, movci
- { kRTypeMask, 2, "srl", "DTA", },
+ { kRTypeMask | (0x1f << 21), 2, "srl", "DTA", },
{ kRTypeMask, 3, "sra", "DTA", },
{ kRTypeMask | (0x1f << 6), 4, "sllv", "DTS", },
{ kRTypeMask | (0x1f << 6), 6, "srlv", "DTS", },
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 14e5ec9..04645d1 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -250,6 +250,8 @@
thread_android.cc
LIBART_TARGET_SRC_FILES_arm := \
+ interpreter/mterp/mterp.cc \
+ interpreter/mterp/out/mterp_arm.S \
arch/arm/context_arm.cc.arm \
arch/arm/entrypoints_init_arm.cc \
arch/arm/instruction_set_features_assembly_tests.S \
@@ -261,6 +263,7 @@
arch/arm/fault_handler_arm.cc
LIBART_TARGET_SRC_FILES_arm64 := \
+ interpreter/mterp/mterp_stub.cc \
arch/arm64/context_arm64.cc \
arch/arm64/entrypoints_init_arm64.cc \
arch/arm64/jni_entrypoints_arm64.S \
@@ -271,6 +274,7 @@
arch/arm64/fault_handler_arm64.cc
LIBART_SRC_FILES_x86 := \
+ interpreter/mterp/mterp_stub.cc \
arch/x86/context_x86.cc \
arch/x86/entrypoints_init_x86.cc \
arch/x86/jni_entrypoints_x86.S \
@@ -285,6 +289,7 @@
# Note that the fault_handler_x86.cc is not a mistake. This file is
# shared between the x86 and x86_64 architectures.
LIBART_SRC_FILES_x86_64 := \
+ interpreter/mterp/mterp_stub.cc \
arch/x86_64/context_x86_64.cc \
arch/x86_64/entrypoints_init_x86_64.cc \
arch/x86_64/jni_entrypoints_x86_64.S \
@@ -298,6 +303,7 @@
$(LIBART_SRC_FILES_x86_64) \
LIBART_TARGET_SRC_FILES_mips := \
+ interpreter/mterp/mterp_stub.cc \
arch/mips/context_mips.cc \
arch/mips/entrypoints_init_mips.cc \
arch/mips/jni_entrypoints_mips.S \
@@ -307,6 +313,7 @@
arch/mips/fault_handler_mips.cc
LIBART_TARGET_SRC_FILES_mips64 := \
+ interpreter/mterp/mterp_stub.cc \
arch/mips64/context_mips64.cc \
arch/mips64/entrypoints_init_mips64.cc \
arch/mips64/jni_entrypoints_mips64.S \
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 2cb2212..7170f73 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -378,8 +378,8 @@
"memory"); // clobber.
#elif defined(__mips__) && defined(__LP64__)
__asm__ __volatile__ (
- // Spill a0-a7 which we say we don't clobber. May contain args.
- "daddiu $sp, $sp, -64\n\t"
+ // Spill a0-a7 and t0-t3 which we say we don't clobber. May contain args.
+ "daddiu $sp, $sp, -96\n\t"
"sd $a0, 0($sp)\n\t"
"sd $a1, 8($sp)\n\t"
"sd $a2, 16($sp)\n\t"
@@ -388,6 +388,10 @@
"sd $a5, 40($sp)\n\t"
"sd $a6, 48($sp)\n\t"
"sd $a7, 56($sp)\n\t"
+ "sd $t0, 64($sp)\n\t"
+ "sd $t1, 72($sp)\n\t"
+ "sd $t2, 80($sp)\n\t"
+ "sd $t3, 88($sp)\n\t"
"daddiu $sp, $sp, -16\n\t" // Reserve stack space, 16B aligned.
"sd %[referrer], 0($sp)\n\t"
@@ -423,13 +427,17 @@
"ld $a5, 40($sp)\n\t"
"ld $a6, 48($sp)\n\t"
"ld $a7, 56($sp)\n\t"
- "daddiu $sp, $sp, 64\n\t"
+ "ld $t0, 64($sp)\n\t"
+ "ld $t1, 72($sp)\n\t"
+ "ld $t2, 80($sp)\n\t"
+ "ld $t3, 88($sp)\n\t"
+ "daddiu $sp, $sp, 96\n\t"
"move %[result], $v0\n\t" // Store the call result.
: [result] "=r" (result)
: [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
[referrer] "r"(referrer), [hidden] "r"(hidden)
- : "at", "v0", "v1", "t0", "t1", "t2", "t3", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ : "at", "v0", "v1", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
"t8", "t9", "k0", "k1", "fp", "ra",
"$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", "$f9", "$f10", "$f11",
"$f12", "$f13", "$f14", "$f15", "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22",
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 2b4826e..31610a3 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -133,8 +133,20 @@
#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_LOCAL_POS_OFFSET + 2 * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_OBJECTS_OFFSET,
art::Thread::ThreadLocalObjectsOffset<__SIZEOF_POINTER__>().Int32Value())
+// Offset of field Thread::tlsPtr_.mterp_current_ibase.
+#define THREAD_CURRENT_IBASE_OFFSET (THREAD_LOCAL_POS_OFFSET + 3 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(THREAD_CURRENT_IBASE_OFFSET,
+ art::Thread::MterpCurrentIBaseOffset<__SIZEOF_POINTER__>().Int32Value())
+// Offset of field Thread::tlsPtr_.mterp_default_ibase.
+#define THREAD_DEFAULT_IBASE_OFFSET (THREAD_LOCAL_POS_OFFSET + 4 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(THREAD_DEFAULT_IBASE_OFFSET,
+ art::Thread::MterpDefaultIBaseOffset<__SIZEOF_POINTER__>().Int32Value())
+// Offset of field Thread::tlsPtr_.mterp_alt_ibase.
+#define THREAD_ALT_IBASE_OFFSET (THREAD_LOCAL_POS_OFFSET + 5 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(THREAD_ALT_IBASE_OFFSET,
+ art::Thread::MterpAltIBaseOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.rosalloc_runs.
-#define THREAD_ROSALLOC_RUNS_OFFSET (THREAD_LOCAL_POS_OFFSET + 3 * __SIZEOF_POINTER__)
+#define THREAD_ROSALLOC_RUNS_OFFSET (THREAD_LOCAL_POS_OFFSET + 6 * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_ROSALLOC_RUNS_OFFSET,
art::Thread::RosAllocRunsOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_alloc_stack_top.
@@ -146,6 +158,40 @@
ADD_TEST_EQ(THREAD_LOCAL_ALLOC_STACK_END_OFFSET,
art::Thread::ThreadLocalAllocStackEndOffset<__SIZEOF_POINTER__>().Int32Value())
+// Offsets within ShadowFrame.
+#define SHADOWFRAME_LINK_OFFSET 0
+ADD_TEST_EQ(SHADOWFRAME_LINK_OFFSET,
+ static_cast<int32_t>(art::ShadowFrame::LinkOffset()))
+#define SHADOWFRAME_METHOD_OFFSET (SHADOWFRAME_LINK_OFFSET + 1 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(SHADOWFRAME_METHOD_OFFSET,
+ static_cast<int32_t>(art::ShadowFrame::MethodOffset()))
+#define SHADOWFRAME_RESULT_REGISTER_OFFSET (SHADOWFRAME_LINK_OFFSET + 2 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(SHADOWFRAME_RESULT_REGISTER_OFFSET,
+ static_cast<int32_t>(art::ShadowFrame::ResultRegisterOffset()))
+#define SHADOWFRAME_DEX_PC_PTR_OFFSET (SHADOWFRAME_LINK_OFFSET + 3 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(SHADOWFRAME_DEX_PC_PTR_OFFSET,
+ static_cast<int32_t>(art::ShadowFrame::DexPCPtrOffset()))
+#define SHADOWFRAME_CODE_ITEM_OFFSET (SHADOWFRAME_LINK_OFFSET + 4 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(SHADOWFRAME_CODE_ITEM_OFFSET,
+ static_cast<int32_t>(art::ShadowFrame::CodeItemOffset()))
+#define SHADOWFRAME_LOCK_COUNT_DATA_OFFSET (SHADOWFRAME_LINK_OFFSET + 5 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(SHADOWFRAME_LOCK_COUNT_DATA_OFFSET,
+ static_cast<int32_t>(art::ShadowFrame::LockCountDataOffset()))
+#define SHADOWFRAME_NUMBER_OF_VREGS_OFFSET (SHADOWFRAME_LINK_OFFSET + 6 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET,
+ static_cast<int32_t>(art::ShadowFrame::NumberOfVRegsOffset()))
+#define SHADOWFRAME_DEX_PC_OFFSET (SHADOWFRAME_NUMBER_OF_VREGS_OFFSET + 4)
+ADD_TEST_EQ(SHADOWFRAME_DEX_PC_OFFSET,
+ static_cast<int32_t>(art::ShadowFrame::DexPCOffset()))
+#define SHADOWFRAME_VREGS_OFFSET (SHADOWFRAME_NUMBER_OF_VREGS_OFFSET + 8)
+ADD_TEST_EQ(SHADOWFRAME_VREGS_OFFSET,
+ static_cast<int32_t>(art::ShadowFrame::VRegsOffset()))
+
+// Offsets within CodeItem
+#define CODEITEM_INSNS_OFFSET 16
+ADD_TEST_EQ(CODEITEM_INSNS_OFFSET,
+ static_cast<int32_t>(OFFSETOF_MEMBER(art::DexFile::CodeItem, insns_)))
+
// Offsets within java.lang.Object.
#define MIRROR_OBJECT_CLASS_OFFSET 0
ADD_TEST_EQ(MIRROR_OBJECT_CLASS_OFFSET, art::mirror::Object::ClassOffset().Int32Value())
@@ -188,6 +234,26 @@
ADD_TEST_EQ(MIRROR_CHAR_ARRAY_DATA_OFFSET,
art::mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value())
+#define MIRROR_BOOLEAN_ARRAY_DATA_OFFSET MIRROR_CHAR_ARRAY_DATA_OFFSET
+ADD_TEST_EQ(MIRROR_BOOLEAN_ARRAY_DATA_OFFSET,
+ art::mirror::Array::DataOffset(sizeof(uint8_t)).Int32Value())
+
+#define MIRROR_BYTE_ARRAY_DATA_OFFSET MIRROR_CHAR_ARRAY_DATA_OFFSET
+ADD_TEST_EQ(MIRROR_BYTE_ARRAY_DATA_OFFSET,
+ art::mirror::Array::DataOffset(sizeof(int8_t)).Int32Value())
+
+#define MIRROR_SHORT_ARRAY_DATA_OFFSET MIRROR_CHAR_ARRAY_DATA_OFFSET
+ADD_TEST_EQ(MIRROR_SHORT_ARRAY_DATA_OFFSET,
+ art::mirror::Array::DataOffset(sizeof(int16_t)).Int32Value())
+
+#define MIRROR_INT_ARRAY_DATA_OFFSET MIRROR_CHAR_ARRAY_DATA_OFFSET
+ADD_TEST_EQ(MIRROR_INT_ARRAY_DATA_OFFSET,
+ art::mirror::Array::DataOffset(sizeof(int32_t)).Int32Value())
+
+#define MIRROR_WIDE_ARRAY_DATA_OFFSET (8 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_WIDE_ARRAY_DATA_OFFSET,
+ art::mirror::Array::DataOffset(sizeof(uint64_t)).Int32Value())
+
#define MIRROR_OBJECT_ARRAY_DATA_OFFSET (4 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_OBJECT_ARRAY_DATA_OFFSET,
art::mirror::Array::DataOffset(
@@ -299,6 +365,12 @@
// Assert this so that we can avoid zeroing the next field by installing the class pointer.
ADD_TEST_EQ(ROSALLOC_SLOT_NEXT_OFFSET, MIRROR_OBJECT_CLASS_OFFSET)
+#define THREAD_SUSPEND_REQUEST 1
+ADD_TEST_EQ(THREAD_SUSPEND_REQUEST, static_cast<int32_t>(art::kSuspendRequest))
+
+#define THREAD_CHECKPOINT_REQUEST 2
+ADD_TEST_EQ(THREAD_CHECKPOINT_REQUEST, static_cast<int32_t>(art::kCheckpointRequest))
+
#if defined(__cplusplus)
} // End of CheckAsmSupportOffsets.
#endif
diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h
index 969f5b9..cea7046 100644
--- a/runtime/base/allocator.h
+++ b/runtime/base/allocator.h
@@ -158,7 +158,7 @@
template<class Key, class T, AllocatorTag kTag, class Compare = std::less<Key>>
using AllocationTrackingMultiMap = std::multimap<
- Key, T, Compare, TrackingAllocator<std::pair<Key, T>, kTag>>;
+ Key, T, Compare, TrackingAllocator<std::pair<const Key, T>, kTag>>;
template<class Key, AllocatorTag kTag, class Compare = std::less<Key>>
using AllocationTrackingSet = std::set<Key, Compare, TrackingAllocator<Key, kTag>>;
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 263f50d..f674a6f 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -75,6 +75,7 @@
kReferenceQueueWeakReferencesLock,
kReferenceQueueClearedReferencesLock,
kReferenceProcessorLock,
+ kJitDebugInterfaceLock,
kJitCodeCacheLock,
kAllocSpaceLock,
kBumpPointerSpaceBlockLock,
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 624abb9..a4e16ae 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -429,7 +429,7 @@
CHECK_EQ(0, dlclose(handle));
}
{
- void* handle = dlopen("libopenjdk.so", RTLD_LAZY);
+ void* handle = dlopen("libopenjdkd.so", RTLD_LAZY);
dlclose(handle);
CHECK_EQ(0, dlclose(handle));
}
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 57d623e..52da28b 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -27,7 +27,6 @@
#include "base/unix_file/fd_file.h"
#include "elf_file_impl.h"
#include "elf_utils.h"
-#include "jit/debugger_interface.h"
#include "leb128.h"
#include "utils.h"
@@ -53,8 +52,6 @@
hash_section_start_(nullptr),
symtab_symbol_table_(nullptr),
dynsym_symbol_table_(nullptr),
- jit_elf_image_(nullptr),
- jit_gdb_entry_(nullptr),
requested_base_(requested_base) {
CHECK(file != nullptr);
}
@@ -273,10 +270,6 @@
STLDeleteElements(&segments_);
delete symtab_symbol_table_;
delete dynsym_symbol_table_;
- delete jit_elf_image_;
- if (jit_gdb_entry_) {
- DeleteJITCodeEntry(jit_gdb_entry_);
- }
}
template <typename ElfTypes>
@@ -1300,11 +1293,6 @@
return false;
}
- // Use GDB JIT support to do stack backtrace, etc.
- if (executable) {
- GdbJITSupport();
- }
-
return true;
}
@@ -1395,50 +1383,6 @@
}
template <typename ElfTypes>
-void ElfFileImpl<ElfTypes>::GdbJITSupport() {
- // We only get here if we only are mapping the program header.
- DCHECK(program_header_only_);
-
- // Well, we need the whole file to do this.
- std::string error_msg;
- // Make it MAP_PRIVATE so we can just give it to gdb if all the necessary
- // sections are there.
- std::unique_ptr<ElfFileImpl<ElfTypes>> all_ptr(
- Open(const_cast<File*>(file_), PROT_READ | PROT_WRITE, MAP_PRIVATE, &error_msg));
- if (all_ptr.get() == nullptr) {
- return;
- }
- ElfFileImpl<ElfTypes>& all = *all_ptr;
-
- // We need the eh_frame for gdb but debug info might be present without it.
- const Elf_Shdr* eh_frame = all.FindSectionByName(".eh_frame");
- if (eh_frame == nullptr) {
- return;
- }
-
- // Do we have interesting sections?
- // We need to add in a strtab and symtab to the image.
- // all is MAP_PRIVATE so it can be written to freely.
- // We also already have strtab and symtab so we are fine there.
- Elf_Ehdr& elf_hdr = all.GetHeader();
- elf_hdr.e_entry = 0;
- elf_hdr.e_phoff = 0;
- elf_hdr.e_phnum = 0;
- elf_hdr.e_phentsize = 0;
- elf_hdr.e_type = ET_EXEC;
-
- // Since base_address_ is 0 if we are actually loaded at a known address (i.e. this is boot.oat)
- // and the actual address stuff starts at in regular files this is good.
- if (!all.FixupDebugSections(reinterpret_cast<intptr_t>(base_address_))) {
- LOG(ERROR) << "Failed to load GDB data";
- return;
- }
-
- jit_gdb_entry_ = CreateJITCodeEntry(all.Begin(), all.Size());
- gdb_file_mapping_.reset(all_ptr.release());
-}
-
-template <typename ElfTypes>
bool ElfFileImpl<ElfTypes>::Strip(std::string* error_msg) {
// ELF files produced by MCLinker look roughly like this
//
diff --git a/runtime/elf_file_impl.h b/runtime/elf_file_impl.h
index 0f466bd..2af31dc 100644
--- a/runtime/elf_file_impl.h
+++ b/runtime/elf_file_impl.h
@@ -213,12 +213,6 @@
SymbolTable* symtab_symbol_table_;
SymbolTable* dynsym_symbol_table_;
- // Support for GDB JIT
- uint8_t* jit_elf_image_;
- JITCodeEntry* jit_gdb_entry_;
- std::unique_ptr<ElfFileImpl<ElfTypes>> gdb_file_mapping_;
- void GdbJITSupport();
-
// Override the 'base' p_vaddr in the first LOAD segment with this value (if non-null).
uint8_t* requested_base_;
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index dc9f14c..f87d48d 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -122,7 +122,10 @@
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_start, thread_local_pos, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_pos, thread_local_end, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_end, thread_local_objects, sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_objects, rosalloc_runs, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_objects, mterp_current_ibase, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_current_ibase, mterp_default_ibase, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_default_ibase, mterp_alt_ibase, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_alt_ibase, rosalloc_runs, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, rosalloc_runs, thread_local_alloc_stack_top,
sizeof(void*) * kNumRosAllocThreadLocalSizeBrackets);
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_alloc_stack_top, thread_local_alloc_stack_end,
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 726cf1b..5e0a11d 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -417,6 +417,13 @@
!Locks::classlinker_classes_lock_);
void UpdateInterpreterHandlerTable() REQUIRES(Locks::mutator_lock_) {
+ /*
+ * TUNING: Dalvik's mterp stashes the actual current handler table base in a
+ * tls field. For Arm, this enables all suspend, debug & tracing checks to be
+ * collapsed into a single conditionally-executed ldw instruction.
+ * Move to Dalvik-style handler-table management for both the goto interpreter and
+ * mterp.
+ */
interpreter_handler_table_ = IsActive() ? kAlternativeHandlerTable : kMainHandlerTable;
}
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 8d5a61a..e7b4731 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -25,6 +25,7 @@
#include "ScopedLocalRef.h"
#include "stack.h"
#include "unstarted_runtime.h"
+#include "mterp/mterp.h"
namespace art {
namespace interpreter {
@@ -224,19 +225,33 @@
}
enum InterpreterImplKind {
- kSwitchImpl, // Switch-based interpreter implementation.
- kComputedGotoImplKind // Computed-goto-based interpreter implementation.
+ kSwitchImplKind, // Switch-based interpreter implementation.
+ kComputedGotoImplKind, // Computed-goto-based interpreter implementation.
+ kMterpImplKind // Assembly interpreter
};
static std::ostream& operator<<(std::ostream& os, const InterpreterImplKind& rhs) {
- os << ((rhs == kSwitchImpl) ? "Switch-based interpreter" : "Computed-goto-based interpreter");
+ os << ((rhs == kSwitchImplKind)
+ ? "Switch-based interpreter"
+ : (rhs == kComputedGotoImplKind)
+ ? "Computed-goto-based interpreter"
+ : "Asm interpreter");
return os;
}
#if !defined(__clang__)
+#if defined(__arm__) && !defined(ART_USE_READ_BARRIER)
+// TODO: remove when all targets implemented.
+static constexpr InterpreterImplKind kInterpreterImplKind = kMterpImplKind;
+#else
static constexpr InterpreterImplKind kInterpreterImplKind = kComputedGotoImplKind;
+#endif
#else
// Clang 3.4 fails to build the goto interpreter implementation.
-static constexpr InterpreterImplKind kInterpreterImplKind = kSwitchImpl;
+#if defined(__arm__) && !defined(ART_USE_READ_BARRIER)
+static constexpr InterpreterImplKind kInterpreterImplKind = kMterpImplKind;
+#else
+static constexpr InterpreterImplKind kInterpreterImplKind = kSwitchImplKind;
+#endif
template<bool do_access_check, bool transaction_active>
JValue ExecuteGotoImpl(Thread*, const DexFile::CodeItem*, ShadowFrame&, JValue) {
LOG(FATAL) << "UNREACHABLE";
@@ -263,18 +278,52 @@
static inline JValue Execute(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register) {
- DCHECK(shadow_frame.GetMethod()->IsInvokable());
+ DCHECK(!shadow_frame.GetMethod()->IsAbstract());
DCHECK(!shadow_frame.GetMethod()->IsNative());
shadow_frame.GetMethod()->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self);
bool transaction_active = Runtime::Current()->IsActiveTransaction();
if (LIKELY(shadow_frame.GetMethod()->IsPreverified())) {
// Enter the "without access check" interpreter.
- if (kInterpreterImplKind == kSwitchImpl) {
+ if (kInterpreterImplKind == kMterpImplKind) {
if (transaction_active) {
- return ExecuteSwitchImpl<false, true>(self, code_item, shadow_frame, result_register);
+ // No Mterp variant - just use the switch interpreter.
+ return ExecuteSwitchImpl<false, true>(self, code_item, shadow_frame, result_register,
+ false);
} else {
- return ExecuteSwitchImpl<false, false>(self, code_item, shadow_frame, result_register);
+ const instrumentation::Instrumentation* const instrumentation =
+ Runtime::Current()->GetInstrumentation();
+ while (true) {
+ if (instrumentation->IsActive() || !Runtime::Current()->IsStarted()) {
+ // TODO: allow JIT profiling instrumentation. Now, just punt on all instrumentation.
+#if !defined(__clang__)
+ return ExecuteGotoImpl<false, false>(self, code_item, shadow_frame, result_register);
+#else
+ return ExecuteSwitchImpl<false, false>(self, code_item, shadow_frame, result_register,
+ false);
+#endif
+ }
+ bool returned = ExecuteMterpImpl(self, code_item, &shadow_frame, &result_register);
+ if (returned) {
+ return result_register;
+ } else {
+ // Mterp didn't like that instruction. Single-step it with the reference interpreter.
+ JValue res = ExecuteSwitchImpl<false, false>(self, code_item, shadow_frame,
+ result_register, true);
+ if (shadow_frame.GetDexPC() == DexFile::kDexNoIndex) {
+ // Single-stepped a return or an exception not handled locally. Return to caller.
+ return res;
+ }
+ }
+ }
+ }
+ } else if (kInterpreterImplKind == kSwitchImplKind) {
+ if (transaction_active) {
+ return ExecuteSwitchImpl<false, true>(self, code_item, shadow_frame, result_register,
+ false);
+ } else {
+ return ExecuteSwitchImpl<false, false>(self, code_item, shadow_frame, result_register,
+ false);
}
} else {
DCHECK_EQ(kInterpreterImplKind, kComputedGotoImplKind);
@@ -286,11 +335,22 @@
}
} else {
// Enter the "with access check" interpreter.
- if (kInterpreterImplKind == kSwitchImpl) {
+ if (kInterpreterImplKind == kMterpImplKind) {
+ // No access check variants for Mterp. Just use the switch version.
if (transaction_active) {
- return ExecuteSwitchImpl<true, true>(self, code_item, shadow_frame, result_register);
+ return ExecuteSwitchImpl<true, true>(self, code_item, shadow_frame, result_register,
+ false);
} else {
- return ExecuteSwitchImpl<true, false>(self, code_item, shadow_frame, result_register);
+ return ExecuteSwitchImpl<true, false>(self, code_item, shadow_frame, result_register,
+ false);
+ }
+ } else if (kInterpreterImplKind == kSwitchImplKind) {
+ if (transaction_active) {
+ return ExecuteSwitchImpl<true, true>(self, code_item, shadow_frame, result_register,
+ false);
+ } else {
+ return ExecuteSwitchImpl<true, false>(self, code_item, shadow_frame, result_register,
+ false);
}
} else {
DCHECK_EQ(kInterpreterImplKind, kComputedGotoImplKind);
@@ -501,5 +561,13 @@
self->PopShadowFrame();
}
+void CheckInterpreterAsmConstants() {
+ CheckMterpAsmConstants();
+}
+
+void InitInterpreterTls(Thread* self) {
+ InitMterpTls(self);
+}
+
} // namespace interpreter
} // namespace art
diff --git a/runtime/interpreter/interpreter.h b/runtime/interpreter/interpreter.h
index 8e7f3da..6353a9b 100644
--- a/runtime/interpreter/interpreter.h
+++ b/runtime/interpreter/interpreter.h
@@ -50,6 +50,11 @@
ShadowFrame* shadow_frame, JValue* result)
SHARED_REQUIRES(Locks::mutator_lock_);
+// One-time sanity check.
+void CheckInterpreterAsmConstants();
+
+void InitInterpreterTls(Thread* self);
+
} // namespace interpreter
} // namespace art
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 9f6699f..932d255 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -67,16 +67,21 @@
namespace art {
namespace interpreter {
-// External references to both interpreter implementations.
+// External references to all interpreter implementations.
template<bool do_access_check, bool transaction_active>
extern JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register);
+ ShadowFrame& shadow_frame, JValue result_register,
+ bool interpret_one_instruction);
template<bool do_access_check, bool transaction_active>
extern JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register);
+// Mterp does not support transactions or access check, thus no templated versions.
+extern "C" bool ExecuteMterpImpl(Thread* self, const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result_register);
+
void ThrowNullPointerExceptionFromInterpreter()
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index c9831e6..bab0d40 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -35,6 +35,9 @@
/* Structured locking is to be enforced for abnormal termination, too. */ \
shadow_frame.GetLockCountData(). \
CheckAllMonitorsReleasedOrThrow<do_assignability_check>(self); \
+ if (interpret_one_instruction) { \
+ shadow_frame.SetDexPC(DexFile::kDexNoIndex); \
+ } \
return JValue(); /* Handled in caller. */ \
} else { \
int32_t displacement = static_cast<int32_t>(found_dex_pc) - static_cast<int32_t>(dex_pc); \
@@ -78,7 +81,8 @@
template<bool do_access_check, bool transaction_active>
JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register) {
+ ShadowFrame& shadow_frame, JValue result_register,
+ bool interpret_one_instruction) {
constexpr bool do_assignability_check = do_access_check;
if (UNLIKELY(!shadow_frame.HasReferenceArray())) {
LOG(FATAL) << "Invalid shadow frame for interpreter use";
@@ -105,7 +109,7 @@
// to keep this live for the scope of the entire function call.
std::unique_ptr<lambda::ClosureBuilder> lambda_closure_builder;
size_t lambda_captured_variable_index = 0;
- while (true) {
+ do {
dex_pc = inst->GetDexPc(insns);
shadow_frame.SetDexPC(dex_pc);
TraceExecution(shadow_frame, inst, dex_pc);
@@ -203,6 +207,9 @@
shadow_frame.GetMethod(), inst->GetDexPc(insns),
result);
}
+ if (interpret_one_instruction) {
+ shadow_frame.SetDexPC(DexFile::kDexNoIndex);
+ }
return result;
}
case Instruction::RETURN_VOID: {
@@ -216,6 +223,9 @@
shadow_frame.GetMethod(), inst->GetDexPc(insns),
result);
}
+ if (interpret_one_instruction) {
+ shadow_frame.SetDexPC(DexFile::kDexNoIndex);
+ }
return result;
}
case Instruction::RETURN: {
@@ -230,6 +240,9 @@
shadow_frame.GetMethod(), inst->GetDexPc(insns),
result);
}
+ if (interpret_one_instruction) {
+ shadow_frame.SetDexPC(DexFile::kDexNoIndex);
+ }
return result;
}
case Instruction::RETURN_WIDE: {
@@ -243,6 +256,9 @@
shadow_frame.GetMethod(), inst->GetDexPc(insns),
result);
}
+ if (interpret_one_instruction) {
+ shadow_frame.SetDexPC(DexFile::kDexNoIndex);
+ }
return result;
}
case Instruction::RETURN_OBJECT: {
@@ -278,6 +294,9 @@
shadow_frame.GetMethod(), inst->GetDexPc(insns),
result);
}
+ if (interpret_one_instruction) {
+ shadow_frame.SetDexPC(DexFile::kDexNoIndex);
+ }
return result;
}
case Instruction::CONST_4: {
@@ -2370,22 +2389,29 @@
case Instruction::UNUSED_7A:
UnexpectedOpcode(inst, shadow_frame);
}
- }
+ } while (!interpret_one_instruction);
+ // Record where we stopped.
+ shadow_frame.SetDexPC(inst->GetDexPc(insns));
+ return JValue();
} // NOLINT(readability/fn_size)
// Explicit definitions of ExecuteSwitchImpl.
template SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR
JValue ExecuteSwitchImpl<true, false>(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register);
+ ShadowFrame& shadow_frame, JValue result_register,
+ bool interpret_one_instruction);
template SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR
JValue ExecuteSwitchImpl<false, false>(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register);
+ ShadowFrame& shadow_frame, JValue result_register,
+ bool interpret_one_instruction);
template SHARED_REQUIRES(Locks::mutator_lock_)
JValue ExecuteSwitchImpl<true, true>(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register);
+ ShadowFrame& shadow_frame, JValue result_register,
+ bool interpret_one_instruction);
template SHARED_REQUIRES(Locks::mutator_lock_)
JValue ExecuteSwitchImpl<false, true>(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register);
+ ShadowFrame& shadow_frame, JValue result_register,
+ bool interpret_one_instruction);
} // namespace interpreter
} // namespace art
diff --git a/runtime/interpreter/mterp/Makefile_mterp b/runtime/interpreter/mterp/Makefile_mterp
new file mode 100644
index 0000000..f0c30ad
--- /dev/null
+++ b/runtime/interpreter/mterp/Makefile_mterp
@@ -0,0 +1,49 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Makefile for the Art fast interpreter. This is not currently
+# integrated into the build system.
+#
+
+SHELL := /bin/sh
+
+# Build system has TARGET_ARCH=arm, but we can support the exact architecture
+# if it is worthwhile.
+#
+# To generate sources:
+# for arch in arm arm64 x86 x86_64 mips mips64
+# do
+# TARGET_ARCH_EXT=$arch make -f Makefile-mterp
+# done
+#
+
+OUTPUT_DIR := out
+
+# Accumulate all possible dependencies for the generated files in a very
+# conservative fashion. If it's not one of the generated files in "out",
+# assume it's a dependency.
+SOURCE_DEPS := \
+ $(shell find . -path ./$(OUTPUT_DIR) -prune -o -type f -print) \
+
+# Source files generated by the script. There's always one C and one
+# assembly file, though in practice one or the other could be empty.
+GEN_SOURCES := \
+ $(OUTPUT_DIR)/interp_asm_$(TARGET_ARCH_EXT).S
+
+target: $(GEN_SOURCES)
+
+$(GEN_SOURCES): $(SOURCE_DEPS)
+ @mkdir -p out
+ ./gen_mterp.py $(TARGET_ARCH_EXT) $(OUTPUT_DIR)
diff --git a/runtime/interpreter/mterp/README.txt b/runtime/interpreter/mterp/README.txt
new file mode 100644
index 0000000..19e02be
--- /dev/null
+++ b/runtime/interpreter/mterp/README.txt
@@ -0,0 +1,197 @@
+rt "mterp" README
+
+NOTE: Find rebuilding instructions at the bottom of this file.
+
+
+==== Overview ====
+
+Every configuration has a "config-*" file that controls how the sources
+are generated. The sources are written into the "out" directory, where
+they are picked up by the Android build system.
+
+The best way to become familiar with the interpreter is to look at the
+generated files in the "out" directory.
+
+
+==== Config file format ====
+
+The config files are parsed from top to bottom. Each line in the file
+may be blank, hold a comment (line starts with '#'), or be a command.
+
+The commands are:
+
+ handler-style <computed-goto|jump-table>
+
+ Specify which style of interpreter to generate. In computed-goto,
+ each handler is allocated a fixed region, allowing transitions to
+ be done via table-start-address + (opcode * handler-size). With
+ jump-table style, handlers may be of any length, and the generated
+ table is an array of pointers to the handlers. This command is required,
+ and must be the first command in the config file.
+
+ handler-size <bytes>
+
+ Specify the size of the fixed region, in bytes. On most platforms
+ this will need to be a power of 2. For jump-table implementations,
+ this command is ignored.
+
+ import <filename>
+
+ The specified file is included immediately, in its entirety. No
+ substitutions are performed. ".cpp" and ".h" files are copied to the
+ C output, ".S" files are copied to the asm output.
+
+ asm-alt-stub <filename>
+
+ When present, this command will cause the generation of an alternate
+ set of entry points (for computed-goto interpreters) or an alternate
+ jump table (for jump-table interpreters).
+
+ fallback-stub <filename>
+
+ Specifies a file to be used for the special FALLBACK tag on the "op"
+ command below. Intended to be used to transfer control to an alternate
+ interpreter to single-step a not-yet-implemented opcode. Note: should
+ note be used on RETURN-class instructions.
+
+ op-start <directory>
+
+ Indicates the start of the opcode list. Must precede any "op"
+ commands. The specified directory is the default location to pull
+ instruction files from.
+
+ op <opcode> <directory>|FALLBACK
+
+ Can only appear after "op-start" and before "op-end". Overrides the
+ default source file location of the specified opcode. The opcode
+ definition will come from the specified file, e.g. "op OP_NOP arm"
+ will load from "arm/OP_NOP.S". A substitution dictionary will be
+ applied (see below). If the special "FALLBACK" token is used instead of
+ a directory name, the source file specified in fallback-stub will instead
+ be used for this opcode.
+
+ alt <opcode> <directory>
+
+ Can only appear after "op-start" and before "op-end". Similar to the
+ "op" command above, but denotes a source file to override the entry
+ in the alternate handler table. The opcode definition will come from
+ the specified file, e.g. "alt OP_NOP arm" will load from
+ "arm/ALT_OP_NOP.S". A substitution dictionary will be applied
+ (see below).
+
+ op-end
+
+ Indicates the end of the opcode list. All kNumPackedOpcodes
+ opcodes are emitted when this is seen, followed by any code that
+ didn't fit inside the fixed-size instruction handler space.
+
+The order of "op" and "alt" directives are not significant; the generation
+tool will extract ordering info from the VM sources.
+
+Typically the form in which most opcodes currently exist is used in
+the "op-start" directive.
+
+==== Instruction file format ====
+
+The assembly instruction files are simply fragments of assembly sources.
+The starting label will be provided by the generation tool, as will
+declarations for the segment type and alignment. The expected target
+assembler is GNU "as", but others will work (may require fiddling with
+some of the pseudo-ops emitted by the generation tool).
+
+A substitution dictionary is applied to all opcode fragments as they are
+appended to the output. Substitutions can look like "$value" or "${value}".
+
+The dictionary always includes:
+
+ $opcode - opcode name, e.g. "OP_NOP"
+ $opnum - opcode number, e.g. 0 for OP_NOP
+ $handler_size_bytes - max size of an instruction handler, in bytes
+ $handler_size_bits - max size of an instruction handler, log 2
+
+Both C and assembly sources will be passed through the C pre-processor,
+so you can take advantage of C-style comments and preprocessor directives
+like "#define".
+
+Some generator operations are available.
+
+ %include "filename" [subst-dict]
+
+ Includes the file, which should look like "arm/OP_NOP.S". You can
+ specify values for the substitution dictionary, using standard Python
+ syntax. For example, this:
+ %include "arm/unop.S" {"result":"r1"}
+ would insert "arm/unop.S" at the current file position, replacing
+ occurrences of "$result" with "r1".
+
+ %default <subst-dict>
+
+ Specify default substitution dictionary values, using standard Python
+ syntax. Useful if you want to have a "base" version and variants.
+
+ %break
+
+ Identifies the split between the main portion of the instruction
+ handler (which must fit in "handler-size" bytes) and the "sister"
+ code, which is appended to the end of the instruction handler block.
+ In jump table implementations, %break is ignored.
+
+The generation tool does *not* print a warning if your instructions
+exceed "handler-size", but the VM will abort on startup if it detects an
+oversized handler. On architectures with fixed-width instructions this
+is easy to work with, on others this you will need to count bytes.
+
+
+==== Using C constants from assembly sources ====
+
+The file "art/runtime/asm_support.h" has some definitions for constant
+values, structure sizes, and struct member offsets. The format is fairly
+restricted, as simple macros are used to massage it for use with both C
+(where it is verified) and assembly (where the definitions are used).
+
+If a constant in the file becomes out of sync, the VM will log an error
+message and abort during startup.
+
+
+==== Development tips ====
+
+If you need to debug the initial piece of an opcode handler, and your
+debug code expands it beyond the handler size limit, you can insert a
+generic header at the top:
+
+ b ${opcode}_start
+%break
+${opcode}_start:
+
+If you already have a %break, it's okay to leave it in place -- the second
+%break is ignored.
+
+
+==== Rebuilding ====
+
+If you change any of the source file fragments, you need to rebuild the
+combined source files in the "out" directory. Make sure the files in
+"out" are editable, then:
+
+ $ cd mterp
+ $ ./rebuild.sh
+
+The ultimate goal is to have the build system generate the necessary
+output files without requiring this separate step, but we're not yet
+ready to require Python in the build.
+
+==== Interpreter Control ====
+
+The mterp fast interpreter achieves much of its performance advantage
+over the C++ interpreter through its efficient mechanism of
+transitioning from one Dalvik bytecode to the next. Mterp for ARM targets
+uses a computed-goto mechanism, in which the handler entrypoints are
+located at the base of the handler table + (opcode * 128).
+
+In normal operation, the dedicated register rIBASE
+(r8 for ARM, edx for x86) holds a mainHandlerTable. If we need to switch
+to a mode that requires inter-instruction checking, rIBASE is changed
+to altHandlerTable. Note that this change is not immediate. What is actually
+changed is the value of curHandlerTable - which is part of the interpBreak
+structure. Rather than explicitly check for changes, each thread will
+blindly refresh rIBASE at backward branches, exception throws and returns.
diff --git a/runtime/interpreter/mterp/arm/alt_stub.S b/runtime/interpreter/mterp/arm/alt_stub.S
new file mode 100644
index 0000000..92ae0c6
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/alt_stub.S
@@ -0,0 +1,12 @@
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (${opnum} * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
diff --git a/runtime/interpreter/mterp/arm/bincmp.S b/runtime/interpreter/mterp/arm/bincmp.S
new file mode 100644
index 0000000..474bc3c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/bincmp.S
@@ -0,0 +1,36 @@
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+#if MTERP_SUSPEND
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r2, r0 @ r2<- vA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ mov${revcmp} r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh rIBASE
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r2, r0 @ r2<- vA
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ mov${revcmp} r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
diff --git a/runtime/interpreter/mterp/arm/binop.S b/runtime/interpreter/mterp/arm/binop.S
new file mode 100644
index 0000000..eeb72ef
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/binop.S
@@ -0,0 +1,35 @@
+%default {"preinstr":"", "result":"r0", "chkzero":"0"}
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ .if $chkzero
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ $preinstr @ optional op; may set condition codes
+ $instr @ $result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG $result, r9 @ vAA<- $result
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
diff --git a/runtime/interpreter/mterp/arm/binop2addr.S b/runtime/interpreter/mterp/arm/binop2addr.S
new file mode 100644
index 0000000..d09a43a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/binop2addr.S
@@ -0,0 +1,32 @@
+%default {"preinstr":"", "result":"r0", "chkzero":"0"}
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ .if $chkzero
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ $preinstr @ optional op; may set condition codes
+ $instr @ $result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG $result, r9 @ vAA<- $result
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm/binopLit16.S b/runtime/interpreter/mterp/arm/binopLit16.S
new file mode 100644
index 0000000..065394e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/binopLit16.S
@@ -0,0 +1,29 @@
+%default {"result":"r0", "chkzero":"0"}
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r2 @ r0<- vB
+ .if $chkzero
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ $instr @ $result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG $result, r9 @ vAA<- $result
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm/binopLit8.S b/runtime/interpreter/mterp/arm/binopLit8.S
new file mode 100644
index 0000000..ec0b3c4
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/binopLit8.S
@@ -0,0 +1,32 @@
+%default {"preinstr":"", "result":"r0", "chkzero":"0"}
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if $chkzero
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ $preinstr @ optional op; may set condition codes
+ $instr @ $result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG $result, r9 @ vAA<- $result
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
diff --git a/runtime/interpreter/mterp/arm/binopWide.S b/runtime/interpreter/mterp/arm/binopWide.S
new file mode 100644
index 0000000..57d43c6
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/binopWide.S
@@ -0,0 +1,38 @@
+%default {"preinstr":"", "result0":"r0", "result1":"r1", "chkzero":"0"}
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if $chkzero
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ $preinstr @ optional op; may set condition codes
+ $instr @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {$result0,$result1} @ vAA/vAA+1<- $result0/$result1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 14-17 instructions */
diff --git a/runtime/interpreter/mterp/arm/binopWide2addr.S b/runtime/interpreter/mterp/arm/binopWide2addr.S
new file mode 100644
index 0000000..4e855f2
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/binopWide2addr.S
@@ -0,0 +1,34 @@
+%default {"preinstr":"", "result0":"r0", "result1":"r1", "chkzero":"0"}
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if $chkzero
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ $preinstr @ optional op; may set condition codes
+ $instr @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {$result0,$result1} @ vAA/vAA+1<- $result0/$result1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 12-15 instructions */
diff --git a/runtime/interpreter/mterp/arm/entry.S b/runtime/interpreter/mterp/arm/entry.S
new file mode 100644
index 0000000..4c5ffc5
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/entry.S
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ */
+
+ .text
+ .align 2
+ .global ExecuteMterpImpl
+ .type ExecuteMterpImpl, %function
+
+/*
+ * On entry:
+ * r0 Thread* self/
+ * r1 code_item
+ * r2 ShadowFrame
+ * r3 JValue* result_register
+ *
+ */
+
+ExecuteMterpImpl:
+ .fnstart
+ .save {r4-r10,fp,lr}
+ stmfd sp!, {r4-r10,fp,lr} @ save 9 regs
+ .pad #4
+ sub sp, sp, #4 @ align 64
+
+ /* Remember the return register */
+ str r3, [r2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
+
+ /* Remember the code_item */
+ str r1, [r2, #SHADOWFRAME_CODE_ITEM_OFFSET]
+
+ /* set up "named" registers */
+ mov rSELF, r0
+ ldr r0, [r2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
+ add rFP, r2, #SHADOWFRAME_VREGS_OFFSET @ point to insns[] (i.e. - the dalivk byte code).
+ add rREFS, rFP, r0, lsl #2 @ point to reference array in shadow frame
+ ldr r0, [r2, #SHADOWFRAME_DEX_PC_OFFSET] @ Get starting dex_pc.
+ add rPC, r1, #CODEITEM_INSNS_OFFSET @ Point to base of insns[]
+ add rPC, rPC, r0, lsl #1 @ Create direct pointer to 1st dex opcode
+ EXPORT_PC
+
+ /* Starting ibase */
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
+
+ /* start executing the instruction at rPC */
+ FETCH_INST @ load rINST from rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+ /* NOTE: no fallthrough */
diff --git a/runtime/interpreter/mterp/arm/fallback.S b/runtime/interpreter/mterp/arm/fallback.S
new file mode 100644
index 0000000..44e7e12
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/fallback.S
@@ -0,0 +1,3 @@
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
diff --git a/runtime/interpreter/mterp/arm/fbinop.S b/runtime/interpreter/mterp/arm/fbinop.S
new file mode 100644
index 0000000..594ee03
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/fbinop.S
@@ -0,0 +1,23 @@
+ /*
+ * Generic 32-bit floating-point operation. Provide an "instr" line that
+ * specifies an instruction that performs "s2 = s0 op s1". Because we
+ * use the "softfp" ABI, this must be an instruction, not a function call.
+ *
+ * For: add-float, sub-float, mul-float, div-float
+ */
+ /* floatop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ flds s1, [r3] @ s1<- vCC
+ flds s0, [r2] @ s0<- vBB
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ $instr @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/fbinop2addr.S b/runtime/interpreter/mterp/arm/fbinop2addr.S
new file mode 100644
index 0000000..b052a29
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/fbinop2addr.S
@@ -0,0 +1,21 @@
+ /*
+ * Generic 32-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "s2 = s0 op s1".
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ and r9, r9, #15 @ r9<- A
+ flds s1, [r3] @ s1<- vB
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ flds s0, [r9] @ s0<- vA
+
+ $instr @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/fbinopWide.S b/runtime/interpreter/mterp/arm/fbinopWide.S
new file mode 100644
index 0000000..1bed817
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/fbinopWide.S
@@ -0,0 +1,23 @@
+ /*
+ * Generic 64-bit double-precision floating point binary operation.
+ * Provide an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * for: add-double, sub-double, mul-double, div-double
+ */
+ /* doubleop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ fldd d1, [r3] @ d1<- vCC
+ fldd d0, [r2] @ d0<- vBB
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ $instr @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/fbinopWide2addr.S b/runtime/interpreter/mterp/arm/fbinopWide2addr.S
new file mode 100644
index 0000000..9f56986
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/fbinopWide2addr.S
@@ -0,0 +1,22 @@
+ /*
+ * Generic 64-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+ * div-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ and r9, r9, #15 @ r9<- A
+ fldd d1, [r3] @ d1<- vB
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ fldd d0, [r9] @ d0<- vA
+
+ $instr @ d2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/footer.S b/runtime/interpreter/mterp/arm/footer.S
new file mode 100644
index 0000000..617f572
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/footer.S
@@ -0,0 +1,167 @@
+/*
+ * ===========================================================================
+ * Common subroutines and data
+ * ===========================================================================
+ */
+
+ .text
+ .align 2
+
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+#define MTERP_LOGGING 0
+common_errDivideByZero:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogDivideByZeroException
+#endif
+ b MterpCommonFallback
+
+common_errArrayIndex:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogArrayIndexException
+#endif
+ b MterpCommonFallback
+
+common_errNegativeArraySize:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogNegativeArraySizeException
+#endif
+ b MterpCommonFallback
+
+common_errNoSuchMethod:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogNoSuchMethodException
+#endif
+ b MterpCommonFallback
+
+common_errNullObject:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogNullObjectException
+#endif
+ b MterpCommonFallback
+
+common_exceptionThrown:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogExceptionThrownException
+#endif
+ b MterpCommonFallback
+
+MterpSuspendFallback:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ ldr r2, [rSELF, #THREAD_FLAGS_OFFSET]
+ bl MterpLogSuspendFallback
+#endif
+ b MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary. If there is a pending
+ * exception, handle it. Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+ ldr r0, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ cmp r0, #0 @ Exception pending?
+ beq MterpFallback @ If not, fall back to reference interpreter.
+ /* intentional fallthrough - handle pending exception. */
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+MterpException:
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpHandleException @ (self, shadow_frame)
+ cmp r0, #0
+ beq MterpExceptionReturn @ no local catch, back to caller.
+ ldr r0, [rFP, #OFF_FP_CODE_ITEM]
+ ldr r1, [rFP, #OFF_FP_DEX_PC]
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
+ add rPC, r0, #CODEITEM_INSNS_OFFSET
+ add rPC, rPC, r1, lsl #1 @ generate new dex_pc_ptr
+ str rPC, [rFP, #OFF_FP_DEX_PC_PTR]
+ /* resume execution at catch block */
+ FETCH_INST
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+ /* NOTE: no fallthrough */
+
+/*
+ * Check for suspend check request. Assumes rINST already loaded, rPC advanced and
+ * still needs to get the opcode and branch to it, and flags are in lr.
+ */
+MterpCheckSuspendAndContinue:
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh rIBASE
+ EXPORT_PC
+ mov r0, rSELF
+ ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ blne MterpSuspendCheck @ (self)
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/*
+ * Bail out to reference interpreter.
+ */
+MterpFallback:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogFallback
+#endif
+MterpCommonFallback:
+ mov r0, #0 @ signal retry with reference interpreter.
+ b MterpDone
+
+/*
+ * We pushed some registers on the stack in ExecuteMterpImpl, then saved
+ * SP and LR. Here we restore SP, restore the registers, and then restore
+ * LR to PC.
+ *
+ * On entry:
+ * uint32_t* rFP (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+ mov r0, #1 @ signal return to caller.
+ b MterpDone
+MterpReturn:
+ ldr r2, [rFP, #OFF_FP_RESULT_REGISTER]
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ str r0, [r2]
+ str r1, [r2, #4]
+ mov r0, rSELF
+ ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ blne MterpSuspendCheck @ (self)
+ mov r0, #1 @ signal return to caller.
+MterpDone:
+ add sp, sp, #4 @ un-align 64
+ ldmfd sp!, {r4-r10,fp,pc} @ restore 9 regs and return
+
+
+ .fnend
+ .size ExecuteMterpImpl, .-ExecuteMterpImpl
+
diff --git a/runtime/interpreter/mterp/arm/funop.S b/runtime/interpreter/mterp/arm/funop.S
new file mode 100644
index 0000000..d7a0859
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/funop.S
@@ -0,0 +1,18 @@
+ /*
+ * Generic 32-bit unary floating-point operation. Provide an "instr"
+ * line that specifies an instruction that performs "s1 = op s0".
+ *
+ * for: int-to-float, float-to-int
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ flds s0, [r3] @ s0<- vB
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ and r9, r9, #15 @ r9<- A
+ $instr @ s1<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ fsts s1, [r9] @ vA<- s1
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/funopNarrower.S b/runtime/interpreter/mterp/arm/funopNarrower.S
new file mode 100644
index 0000000..9daec28
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/funopNarrower.S
@@ -0,0 +1,18 @@
+ /*
+ * Generic 64bit-to-32bit unary floating point operation. Provide an
+ * "instr" line that specifies an instruction that performs "s0 = op d0".
+ *
+ * For: double-to-int, double-to-float
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ fldd d0, [r3] @ d0<- vB
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ and r9, r9, #15 @ r9<- A
+ $instr @ s0<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ fsts s0, [r9] @ vA<- s0
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/funopWider.S b/runtime/interpreter/mterp/arm/funopWider.S
new file mode 100644
index 0000000..087a1f2
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/funopWider.S
@@ -0,0 +1,18 @@
+ /*
+ * Generic 32bit-to-64bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "d0 = op s0".
+ *
+ * For: int-to-double, float-to-double
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ flds s0, [r3] @ s0<- vB
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ and r9, r9, #15 @ r9<- A
+ $instr @ d0<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ fstd d0, [r9] @ vA<- d0
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/header.S b/runtime/interpreter/mterp/arm/header.S
new file mode 100644
index 0000000..14319d9
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/header.S
@@ -0,0 +1,279 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ Art assembly interpreter notes:
+
+ First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
+ handle invoke, allows higher-level code to create frame & shadow frame.
+
+ Once that's working, support direct entry code & eliminate shadow frame (and
+ excess locals allocation.
+
+ Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
+ base of the vreg array within the shadow frame. Access the other fields,
+ dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
+ the shadow frame mechanism of double-storing object references - via rFP &
+ number_of_vregs_.
+
+ */
+
+/*
+ARM EABI general notes:
+
+r0-r3 hold first 4 args to a method; they are not preserved across method calls
+r4-r8 are available for general use
+r9 is given special treatment in some situations, but not for us
+r10 (sl) seems to be generally available
+r11 (fp) is used by gcc (unless -fomit-frame-pointer is set)
+r12 (ip) is scratch -- not preserved across method calls
+r13 (sp) should be managed carefully in case a signal arrives
+r14 (lr) must be preserved
+r15 (pc) can be tinkered with directly
+
+r0 holds returns of <= 4 bytes
+r0-r1 hold returns of 8 bytes, low word in r0
+
+Callee must save/restore r4+ (except r12) if it modifies them. If VFP
+is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved,
+s0-s15 (d0-d7, q0-a3) do not need to be.
+
+Stack is "full descending". Only the arguments that don't fit in the first 4
+registers are placed on the stack. "sp" points at the first stacked argument
+(i.e. the 5th arg).
+
+VFP: single-precision results in s0, double-precision results in d0.
+
+In the EABI, "sp" must be 64-bit aligned on entry to a function, and any
+64-bit quantities (long long, double) must be 64-bit aligned.
+*/
+
+/*
+Mterp and ARM notes:
+
+The following registers have fixed assignments:
+
+ reg nick purpose
+ r4 rPC interpreted program counter, used for fetching instructions
+ r5 rFP interpreted frame pointer, used for accessing locals and args
+ r6 rSELF self (Thread) pointer
+ r7 rINST first 16-bit code unit of current instruction
+ r8 rIBASE interpreted instruction base pointer, used for computed goto
+ r11 rREFS base of object references in shadow frame (ideally, we'll get rid of this later).
+
+Macros are provided for common operations. Each macro MUST emit only
+one instruction to make instruction-counting easier. They MUST NOT alter
+unspecified registers or condition codes.
+*/
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "asm_support.h"
+
+/* During bringup, we'll use the shadow frame model instead of rFP */
+/* single-purpose registers, given names for clarity */
+#define rPC r4
+#define rFP r5
+#define rSELF r6
+#define rINST r7
+#define rIBASE r8
+#define rREFS r11
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
+ * to access other shadow frame fields, we need to use a backwards offset. Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
+
+/*
+ *
+ * The reference interpreter performs explicit suspect checks, which is somewhat wasteful.
+ * Dalvik's interpreter folded suspend checks into the jump table mechanism, and eventually
+ * mterp should do so as well.
+ */
+#define MTERP_SUSPEND 0
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array. For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+.macro EXPORT_PC
+ str rPC, [rFP, #OFF_FP_DEX_PC_PTR]
+.endm
+
+.macro EXPORT_DEX_PC tmp
+ ldr \tmp, [rFP, #OFF_FP_CODE_ITEM]
+ str rPC, [rFP, #OFF_FP_DEX_PC_PTR]
+ add \tmp, #CODEITEM_INSNS_OFFSET
+ sub \tmp, rPC, \tmp
+ asr \tmp, #1
+ str \tmp, [rFP, #OFF_FP_DEX_PC]
+.endm
+
+/*
+ * Fetch the next instruction from rPC into rINST. Does not advance rPC.
+ */
+.macro FETCH_INST
+ ldrh rINST, [rPC]
+.endm
+
+/*
+ * Fetch the next instruction from the specified offset. Advances rPC
+ * to point to the next instruction. "_count" is in 16-bit code units.
+ *
+ * Because of the limited size of immediate constants on ARM, this is only
+ * suitable for small forward movements (i.e. don't try to implement "goto"
+ * with this).
+ *
+ * This must come AFTER anything that can throw an exception, or the
+ * exception catch may miss. (This also implies that it must come after
+ * EXPORT_PC.)
+ */
+.macro FETCH_ADVANCE_INST count
+ ldrh rINST, [rPC, #((\count)*2)]!
+.endm
+
+/*
+ * The operation performed here is similar to FETCH_ADVANCE_INST, except the
+ * src and dest registers are parameterized (not hard-wired to rPC and rINST).
+ */
+.macro PREFETCH_ADVANCE_INST dreg, sreg, count
+ ldrh \dreg, [\sreg, #((\count)*2)]!
+.endm
+
+/*
+ * Similar to FETCH_ADVANCE_INST, but does not update rPC. Used to load
+ * rINST ahead of possible exception point. Be sure to manually advance rPC
+ * later.
+ */
+.macro PREFETCH_INST count
+ ldrh rINST, [rPC, #((\count)*2)]
+.endm
+
+/* Advance rPC by some number of code units. */
+.macro ADVANCE count
+ add rPC, #((\count)*2)
+.endm
+
+/*
+ * Fetch the next instruction from an offset specified by _reg. Updates
+ * rPC to point to the next instruction. "_reg" must specify the distance
+ * in bytes, *not* 16-bit code units, and may be a signed value.
+ *
+ * We want to write "ldrh rINST, [rPC, _reg, lsl #1]!", but some of the
+ * bits that hold the shift distance are used for the half/byte/sign flags.
+ * In some cases we can pre-double _reg for free, so we require a byte offset
+ * here.
+ */
+.macro FETCH_ADVANCE_INST_RB reg
+ ldrh rINST, [rPC, \reg]!
+.endm
+
+/*
+ * Fetch a half-word code unit from an offset past the current PC. The
+ * "_count" value is in 16-bit code units. Does not advance rPC.
+ *
+ * The "_S" variant works the same but treats the value as signed.
+ */
+.macro FETCH reg, count
+ ldrh \reg, [rPC, #((\count)*2)]
+.endm
+
+.macro FETCH_S reg, count
+ ldrsh \reg, [rPC, #((\count)*2)]
+.endm
+
+/*
+ * Fetch one byte from an offset past the current PC. Pass in the same
+ * "_count" as you would for FETCH, and an additional 0/1 indicating which
+ * byte of the halfword you want (lo/hi).
+ */
+.macro FETCH_B reg, count, byte
+ ldrb \reg, [rPC, #((\count)*2+(\byte))]
+.endm
+
+/*
+ * Put the instruction's opcode field into the specified register.
+ */
+.macro GET_INST_OPCODE reg
+ and \reg, rINST, #255
+.endm
+
+/*
+ * Put the prefetched instruction's opcode field into the specified register.
+ */
+.macro GET_PREFETCHED_OPCODE oreg, ireg
+ and \oreg, \ireg, #255
+.endm
+
+/*
+ * Begin executing the opcode in _reg. Because this only jumps within the
+ * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork.
+ */
+.macro GOTO_OPCODE reg
+ add pc, rIBASE, \reg, lsl #${handler_size_bits}
+.endm
+.macro GOTO_OPCODE_BASE base,reg
+ add pc, \base, \reg, lsl #${handler_size_bits}
+.endm
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+.macro GET_VREG reg, vreg
+ ldr \reg, [rFP, \vreg, lsl #2]
+.endm
+.macro SET_VREG reg, vreg
+ str \reg, [rFP, \vreg, lsl #2]
+ mov \reg, #0
+ str \reg, [rREFS, \vreg, lsl #2]
+.endm
+.macro SET_VREG_OBJECT reg, vreg, tmpreg
+ str \reg, [rFP, \vreg, lsl #2]
+ str \reg, [rREFS, \vreg, lsl #2]
+.endm
+
+/*
+ * Convert a virtual register index into an address.
+ */
+.macro VREG_INDEX_TO_ADDR reg, vreg
+ add \reg, rFP, \vreg, lsl #2 /* WARNING/FIXME: handle shadow frame vreg zero if store */
+.endm
+
+/*
+ * Refresh handler table.
+ */
+.macro REFRESH_IBASE
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
+.endm
diff --git a/runtime/interpreter/mterp/arm/invoke.S b/runtime/interpreter/mterp/arm/invoke.S
new file mode 100644
index 0000000..7575865
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/invoke.S
@@ -0,0 +1,19 @@
+%default { "helper":"UndefinedInvokeHandler" }
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern $helper
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl $helper
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
diff --git a/runtime/interpreter/mterp/arm/op_add_double.S b/runtime/interpreter/mterp/arm/op_add_double.S
new file mode 100644
index 0000000..9332bf2
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_add_double.S
@@ -0,0 +1 @@
+%include "arm/fbinopWide.S" {"instr":"faddd d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_double_2addr.S b/runtime/interpreter/mterp/arm/op_add_double_2addr.S
new file mode 100644
index 0000000..3242c53
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_add_double_2addr.S
@@ -0,0 +1 @@
+%include "arm/fbinopWide2addr.S" {"instr":"faddd d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_float.S b/runtime/interpreter/mterp/arm/op_add_float.S
new file mode 100644
index 0000000..afb7967
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_add_float.S
@@ -0,0 +1 @@
+%include "arm/fbinop.S" {"instr":"fadds s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_float_2addr.S b/runtime/interpreter/mterp/arm/op_add_float_2addr.S
new file mode 100644
index 0000000..0067b6a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_add_float_2addr.S
@@ -0,0 +1 @@
+%include "arm/fbinop2addr.S" {"instr":"fadds s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_int.S b/runtime/interpreter/mterp/arm/op_add_int.S
new file mode 100644
index 0000000..1dcae7e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_add_int.S
@@ -0,0 +1 @@
+%include "arm/binop.S" {"instr":"add r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_int_2addr.S b/runtime/interpreter/mterp/arm/op_add_int_2addr.S
new file mode 100644
index 0000000..9ea98f1
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_add_int_2addr.S
@@ -0,0 +1 @@
+%include "arm/binop2addr.S" {"instr":"add r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_int_lit16.S b/runtime/interpreter/mterp/arm/op_add_int_lit16.S
new file mode 100644
index 0000000..5763ab8
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_add_int_lit16.S
@@ -0,0 +1 @@
+%include "arm/binopLit16.S" {"instr":"add r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_int_lit8.S b/runtime/interpreter/mterp/arm/op_add_int_lit8.S
new file mode 100644
index 0000000..b84684a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_add_int_lit8.S
@@ -0,0 +1 @@
+%include "arm/binopLit8.S" {"instr":"add r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_long.S b/runtime/interpreter/mterp/arm/op_add_long.S
new file mode 100644
index 0000000..093223e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_add_long.S
@@ -0,0 +1 @@
+%include "arm/binopWide.S" {"preinstr":"adds r0, r0, r2", "instr":"adc r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_add_long_2addr.S b/runtime/interpreter/mterp/arm/op_add_long_2addr.S
new file mode 100644
index 0000000..c11e0af
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_add_long_2addr.S
@@ -0,0 +1 @@
+%include "arm/binopWide2addr.S" {"preinstr":"adds r0, r0, r2", "instr":"adc r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_aget.S b/runtime/interpreter/mterp/arm/op_aget.S
new file mode 100644
index 0000000..11f7079
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_aget.S
@@ -0,0 +1,29 @@
+%default { "load":"ldr", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #$shift @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ $load r2, [r0, #$data_offset] @ r2<- vBB[vCC]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r2, r9 @ vAA<- r2
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_aget_boolean.S b/runtime/interpreter/mterp/arm/op_aget_boolean.S
new file mode 100644
index 0000000..8f678dc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_aget_boolean.S
@@ -0,0 +1 @@
+%include "arm/op_aget.S" { "load":"ldrb", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aget_byte.S b/runtime/interpreter/mterp/arm/op_aget_byte.S
new file mode 100644
index 0000000..a304650
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_aget_byte.S
@@ -0,0 +1 @@
+%include "arm/op_aget.S" { "load":"ldrsb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aget_char.S b/runtime/interpreter/mterp/arm/op_aget_char.S
new file mode 100644
index 0000000..4908306
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_aget_char.S
@@ -0,0 +1 @@
+%include "arm/op_aget.S" { "load":"ldrh", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aget_object.S b/runtime/interpreter/mterp/arm/op_aget_object.S
new file mode 100644
index 0000000..4e0aab5
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_aget_object.S
@@ -0,0 +1,21 @@
+ /*
+ * Array object get. vAA <- vBB[vCC].
+ *
+ * for: aget-object
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ EXPORT_PC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ bl artAGetObjectFromMterp @ (array, index)
+ ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ PREFETCH_INST 2
+ cmp r1, #0
+ bne MterpException
+ SET_VREG_OBJECT r0, r9
+ ADVANCE 2
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_aget_short.S b/runtime/interpreter/mterp/arm/op_aget_short.S
new file mode 100644
index 0000000..b71e659
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_aget_short.S
@@ -0,0 +1 @@
+%include "arm/op_aget.S" { "load":"ldrsh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aget_wide.S b/runtime/interpreter/mterp/arm/op_aget_wide.S
new file mode 100644
index 0000000..caaec71
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_aget_wide.S
@@ -0,0 +1,24 @@
+ /*
+ * Array get, 64 bits. vAA <- vBB[vCC].
+ *
+ * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD.
+ */
+ /* aget-wide vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ ldrd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_and_int.S b/runtime/interpreter/mterp/arm/op_and_int.S
new file mode 100644
index 0000000..7c16d37
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_and_int.S
@@ -0,0 +1 @@
+%include "arm/binop.S" {"instr":"and r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_and_int_2addr.S b/runtime/interpreter/mterp/arm/op_and_int_2addr.S
new file mode 100644
index 0000000..0fbab02
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_and_int_2addr.S
@@ -0,0 +1 @@
+%include "arm/binop2addr.S" {"instr":"and r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_and_int_lit16.S b/runtime/interpreter/mterp/arm/op_and_int_lit16.S
new file mode 100644
index 0000000..541e9b7
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_and_int_lit16.S
@@ -0,0 +1 @@
+%include "arm/binopLit16.S" {"instr":"and r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_and_int_lit8.S b/runtime/interpreter/mterp/arm/op_and_int_lit8.S
new file mode 100644
index 0000000..d5783e5
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_and_int_lit8.S
@@ -0,0 +1 @@
+%include "arm/binopLit8.S" {"instr":"and r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_and_long.S b/runtime/interpreter/mterp/arm/op_and_long.S
new file mode 100644
index 0000000..4ad5158
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_and_long.S
@@ -0,0 +1 @@
+%include "arm/binopWide.S" {"preinstr":"and r0, r0, r2", "instr":"and r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_and_long_2addr.S b/runtime/interpreter/mterp/arm/op_and_long_2addr.S
new file mode 100644
index 0000000..e23ea44
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_and_long_2addr.S
@@ -0,0 +1 @@
+%include "arm/binopWide2addr.S" {"preinstr":"and r0, r0, r2", "instr":"and r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_aput.S b/runtime/interpreter/mterp/arm/op_aput.S
new file mode 100644
index 0000000..a511fa5
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_aput.S
@@ -0,0 +1,29 @@
+%default { "store":"str", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #$shift @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_VREG r2, r9 @ r2<- vAA
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ $store r2, [r0, #$data_offset] @ vBB[vCC]<- r2
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_aput_boolean.S b/runtime/interpreter/mterp/arm/op_aput_boolean.S
new file mode 100644
index 0000000..e86663f
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_aput_boolean.S
@@ -0,0 +1 @@
+%include "arm/op_aput.S" { "store":"strb", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aput_byte.S b/runtime/interpreter/mterp/arm/op_aput_byte.S
new file mode 100644
index 0000000..83694b7
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_aput_byte.S
@@ -0,0 +1 @@
+%include "arm/op_aput.S" { "store":"strb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aput_char.S b/runtime/interpreter/mterp/arm/op_aput_char.S
new file mode 100644
index 0000000..3551cac
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_aput_char.S
@@ -0,0 +1 @@
+%include "arm/op_aput.S" { "store":"strh", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aput_object.S b/runtime/interpreter/mterp/arm/op_aput_object.S
new file mode 100644
index 0000000..c539916
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_aput_object.S
@@ -0,0 +1,14 @@
+ /*
+ * Store an object into an array. vBB[vCC] <- vAA.
+ */
+ /* op vAA, vBB, vCC */
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rINST
+ bl MterpAputObject
+ cmp r0, #0
+ beq MterpPossibleException
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_aput_short.S b/runtime/interpreter/mterp/arm/op_aput_short.S
new file mode 100644
index 0000000..0a0590e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_aput_short.S
@@ -0,0 +1 @@
+%include "arm/op_aput.S" { "store":"strh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aput_wide.S b/runtime/interpreter/mterp/arm/op_aput_wide.S
new file mode 100644
index 0000000..49839d1
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_aput_wide.S
@@ -0,0 +1,24 @@
+ /*
+ * Array put, 64 bits. vBB[vCC] <- vAA.
+ *
+ * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
+ */
+ /* aput-wide vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ strd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_array_length.S b/runtime/interpreter/mterp/arm/op_array_length.S
new file mode 100644
index 0000000..43b1682
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_array_length.S
@@ -0,0 +1,13 @@
+ /*
+ * Return the length of an array.
+ */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ GET_VREG r0, r1 @ r0<- vB (object ref)
+ cmp r0, #0 @ is object null?
+ beq common_errNullObject @ yup, fail
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- array length
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r3, r2 @ vB<- length
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_check_cast.S b/runtime/interpreter/mterp/arm/op_check_cast.S
new file mode 100644
index 0000000..3e3ac70
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_check_cast.S
@@ -0,0 +1,17 @@
+ /*
+ * Check to see if a cast from one class to another is allowed.
+ */
+ /* check-cast vAA, class@BBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- BBBB
+ mov r1, rINST, lsr #8 @ r1<- AA
+ GET_VREG r1, r1 @ r1<- object
+ ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- method
+ mov r3, rSELF @ r3<- self
+ bl MterpCheckCast @ (index, obj, method, self)
+ PREFETCH_INST 2
+ cmp r0, #0
+ bne MterpPossibleException
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_cmp_long.S b/runtime/interpreter/mterp/arm/op_cmp_long.S
new file mode 100644
index 0000000..2b4c0ea
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_cmp_long.S
@@ -0,0 +1,56 @@
+ /*
+ * Compare two 64-bit values. Puts 0, 1, or -1 into the destination
+ * register based on the results of the comparison.
+ *
+ * We load the full values with LDM, but in practice many values could
+ * be resolved by only looking at the high word. This could be made
+ * faster or slower by splitting the LDM into a pair of LDRs.
+ *
+ * If we just wanted to set condition flags, we could do this:
+ * subs ip, r0, r2
+ * sbcs ip, r1, r3
+ * subeqs ip, r0, r2
+ * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific
+ * integer value, which we can do with 2 conditional mov/mvn instructions
+ * (set 1, set -1; if they're equal we already have 0 in ip), giving
+ * us a constant 5-cycle path plus a branch at the end to the
+ * instruction epilogue code. The multi-compare approach below needs
+ * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch
+ * in the worst case (the 64-bit values are equal).
+ */
+ /* cmp-long vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ cmp r1, r3 @ compare (vBB+1, vCC+1)
+ blt .L${opcode}_less @ signed compare on high part
+ bgt .L${opcode}_greater
+ subs r1, r0, r2 @ r1<- r0 - r2
+ bhi .L${opcode}_greater @ unsigned compare on low part
+ bne .L${opcode}_less
+ b .L${opcode}_finish @ equal; r1 already holds 0
+%break
+
+.L${opcode}_less:
+ mvn r1, #0 @ r1<- -1
+ @ Want to cond code the next mov so we can avoid branch, but don't see it;
+ @ instead, we just replicate the tail end.
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r1, r9 @ vAA<- r1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+.L${opcode}_greater:
+ mov r1, #1 @ r1<- 1
+ @ fall through to _finish
+
+.L${opcode}_finish:
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r1, r9 @ vAA<- r1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_cmpg_double.S b/runtime/interpreter/mterp/arm/op_cmpg_double.S
new file mode 100644
index 0000000..4b05c44
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_cmpg_double.S
@@ -0,0 +1,34 @@
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x < y) {
+ * return -1;
+ * } else if (x > y) {
+ * return 1;
+ * } else {
+ * return 1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ fldd d0, [r2] @ d0<- vBB
+ fldd d1, [r3] @ d1<- vCC
+ fcmped d0, d1 @ compare (vBB, vCC)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mov r0, #1 @ r0<- 1 (default)
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fmstat @ export status flags
+ mvnmi r0, #0 @ (less than) r1<- -1
+ moveq r0, #0 @ (equal) r1<- 0
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_cmpg_float.S b/runtime/interpreter/mterp/arm/op_cmpg_float.S
new file mode 100644
index 0000000..d5d2df2
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_cmpg_float.S
@@ -0,0 +1,34 @@
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x < y) {
+ * return -1;
+ * } else if (x > y) {
+ * return 1;
+ * } else {
+ * return 1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ flds s0, [r2] @ s0<- vBB
+ flds s1, [r3] @ s1<- vCC
+ fcmpes s0, s1 @ compare (vBB, vCC)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mov r0, #1 @ r0<- 1 (default)
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fmstat @ export status flags
+ mvnmi r0, #0 @ (less than) r1<- -1
+ moveq r0, #0 @ (equal) r1<- 0
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_cmpl_double.S b/runtime/interpreter/mterp/arm/op_cmpl_double.S
new file mode 100644
index 0000000..6ee53b3
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_cmpl_double.S
@@ -0,0 +1,34 @@
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x > y) {
+ * return 1;
+ * } else if (x < y) {
+ * return -1;
+ * } else {
+ * return -1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ fldd d0, [r2] @ d0<- vBB
+ fldd d1, [r3] @ d1<- vCC
+ fcmped d0, d1 @ compare (vBB, vCC)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mvn r0, #0 @ r0<- -1 (default)
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fmstat @ export status flags
+ movgt r0, #1 @ (greater than) r1<- 1
+ moveq r0, #0 @ (equal) r1<- 0
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_cmpl_float.S b/runtime/interpreter/mterp/arm/op_cmpl_float.S
new file mode 100644
index 0000000..64535b6
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_cmpl_float.S
@@ -0,0 +1,34 @@
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x > y) {
+ * return 1;
+ * } else if (x < y) {
+ * return -1;
+ * } else {
+ * return -1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ flds s0, [r2] @ s0<- vBB
+ flds s1, [r3] @ s1<- vCC
+ fcmpes s0, s1 @ compare (vBB, vCC)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mvn r0, #0 @ r0<- -1 (default)
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fmstat @ export status flags
+ movgt r0, #1 @ (greater than) r1<- 1
+ moveq r0, #0 @ (equal) r1<- 0
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const.S b/runtime/interpreter/mterp/arm/op_const.S
new file mode 100644
index 0000000..de3e3c3
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_const.S
@@ -0,0 +1,9 @@
+ /* const vAA, #+BBBBbbbb */
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH r0, 1 @ r0<- bbbb (low
+ FETCH r1, 2 @ r1<- BBBB (high
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r3 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_16.S b/runtime/interpreter/mterp/arm/op_const_16.S
new file mode 100644
index 0000000..59c6dac
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_const_16.S
@@ -0,0 +1,7 @@
+ /* const/16 vAA, #+BBBB */
+ FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r0, r3 @ vAA<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_4.S b/runtime/interpreter/mterp/arm/op_const_4.S
new file mode 100644
index 0000000..c177bb9
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_const_4.S
@@ -0,0 +1,8 @@
+ /* const/4 vA, #+B */
+ mov r1, rINST, lsl #16 @ r1<- Bxxx0000
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ mov r1, r1, asr #28 @ r1<- sssssssB (sign-extended)
+ GET_INST_OPCODE ip @ ip<- opcode from rINST
+ SET_VREG r1, r0 @ fp[A]<- r1
+ GOTO_OPCODE ip @ execute next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_class.S b/runtime/interpreter/mterp/arm/op_const_class.S
new file mode 100644
index 0000000..0b111f4
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_const_class.S
@@ -0,0 +1,13 @@
+ /* const/class vAA, Class@BBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- BBBB
+ mov r1, rINST, lsr #8 @ r1<- AA
+ add r2, rFP, #OFF_FP_SHADOWFRAME
+ mov r3, rSELF
+ bl MterpConstClass @ (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2
+ cmp r0, #0
+ bne MterpPossibleException
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_high16.S b/runtime/interpreter/mterp/arm/op_const_high16.S
new file mode 100644
index 0000000..460d546
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_const_high16.S
@@ -0,0 +1,8 @@
+ /* const/high16 vAA, #+BBBB0000 */
+ FETCH r0, 1 @ r0<- 0000BBBB (zero-extended
+ mov r3, rINST, lsr #8 @ r3<- AA
+ mov r0, r0, lsl #16 @ r0<- BBBB0000
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r0, r3 @ vAA<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_string.S b/runtime/interpreter/mterp/arm/op_const_string.S
new file mode 100644
index 0000000..4b8302a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_const_string.S
@@ -0,0 +1,13 @@
+ /* const/string vAA, String@BBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- BBBB
+ mov r1, rINST, lsr #8 @ r1<- AA
+ add r2, rFP, #OFF_FP_SHADOWFRAME
+ mov r3, rSELF
+ bl MterpConstString @ (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2 @ load rINST
+ cmp r0, #0 @ fail?
+ bne MterpPossibleException @ let reference interpreter deal with it.
+ ADVANCE 2 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_string_jumbo.S b/runtime/interpreter/mterp/arm/op_const_string_jumbo.S
new file mode 100644
index 0000000..1a3d0b2
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_const_string_jumbo.S
@@ -0,0 +1,15 @@
+ /* const/string vAA, String@BBBBBBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- bbbb (low
+ FETCH r2, 2 @ r2<- BBBB (high
+ mov r1, rINST, lsr #8 @ r1<- AA
+ orr r0, r0, r2, lsl #16 @ r1<- BBBBbbbb
+ add r2, rFP, #OFF_FP_SHADOWFRAME
+ mov r3, rSELF
+ bl MterpConstString @ (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 3 @ advance rPC
+ cmp r0, #0 @ fail?
+ bne MterpPossibleException @ let reference interpreter deal with it.
+ ADVANCE 3 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_wide.S b/runtime/interpreter/mterp/arm/op_const_wide.S
new file mode 100644
index 0000000..2cdc426
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_const_wide.S
@@ -0,0 +1,13 @@
+ /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+ FETCH r0, 1 @ r0<- bbbb (low)
+ FETCH r1, 2 @ r1<- BBBB (low middle)
+ FETCH r2, 3 @ r2<- hhhh (high middle)
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word)
+ FETCH r3, 4 @ r3<- HHHH (high)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word)
+ FETCH_ADVANCE_INST 5 @ advance rPC, load rINST
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_wide_16.S b/runtime/interpreter/mterp/arm/op_const_wide_16.S
new file mode 100644
index 0000000..56bfc17
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_const_wide_16.S
@@ -0,0 +1,9 @@
+ /* const-wide/16 vAA, #+BBBB */
+ FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended
+ mov r3, rINST, lsr #8 @ r3<- AA
+ mov r1, r0, asr #31 @ r1<- ssssssss
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r3, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_wide_32.S b/runtime/interpreter/mterp/arm/op_const_wide_32.S
new file mode 100644
index 0000000..36d4628
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_const_wide_32.S
@@ -0,0 +1,11 @@
+ /* const-wide/32 vAA, #+BBBBbbbb */
+ FETCH r0, 1 @ r0<- 0000bbbb (low)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_S r2, 2 @ r2<- ssssBBBB (high)
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ mov r1, r0, asr #31 @ r1<- ssssssss
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r3, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_wide_high16.S b/runtime/interpreter/mterp/arm/op_const_wide_high16.S
new file mode 100644
index 0000000..bee592d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_const_wide_high16.S
@@ -0,0 +1,10 @@
+ /* const-wide/high16 vAA, #+BBBB000000000000 */
+ FETCH r1, 1 @ r1<- 0000BBBB (zero-extended)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ mov r0, #0 @ r0<- 00000000
+ mov r1, r1, lsl #16 @ r1<- BBBB0000
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r3, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_div_double.S b/runtime/interpreter/mterp/arm/op_div_double.S
new file mode 100644
index 0000000..5147550
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_div_double.S
@@ -0,0 +1 @@
+%include "arm/fbinopWide.S" {"instr":"fdivd d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_div_double_2addr.S b/runtime/interpreter/mterp/arm/op_div_double_2addr.S
new file mode 100644
index 0000000..b812f17
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_div_double_2addr.S
@@ -0,0 +1 @@
+%include "arm/fbinopWide2addr.S" {"instr":"fdivd d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_div_float.S b/runtime/interpreter/mterp/arm/op_div_float.S
new file mode 100644
index 0000000..0f24d11
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_div_float.S
@@ -0,0 +1 @@
+%include "arm/fbinop.S" {"instr":"fdivs s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_div_float_2addr.S b/runtime/interpreter/mterp/arm/op_div_float_2addr.S
new file mode 100644
index 0000000..a1dbf01
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_div_float_2addr.S
@@ -0,0 +1 @@
+%include "arm/fbinop2addr.S" {"instr":"fdivs s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_div_int.S b/runtime/interpreter/mterp/arm/op_div_int.S
new file mode 100644
index 0000000..251064b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_div_int.S
@@ -0,0 +1,30 @@
+%default {}
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * div-int
+ *
+ */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r0, r0, r1 @ r0<- op
+#else
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
diff --git a/runtime/interpreter/mterp/arm/op_div_int_2addr.S b/runtime/interpreter/mterp/arm/op_div_int_2addr.S
new file mode 100644
index 0000000..9be4cd8
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_div_int_2addr.S
@@ -0,0 +1,29 @@
+%default {}
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * div-int/2addr
+ *
+ */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r0, r0, r1 @ r0<- op
+#else
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
diff --git a/runtime/interpreter/mterp/arm/op_div_int_lit16.S b/runtime/interpreter/mterp/arm/op_div_int_lit16.S
new file mode 100644
index 0000000..d9bc7d6
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_div_int_lit16.S
@@ -0,0 +1,28 @@
+%default {}
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * div-int/lit16
+ *
+ */
+ FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r2 @ r0<- vB
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r0, r0, r1 @ r0<- op
+#else
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm/op_div_int_lit8.S b/runtime/interpreter/mterp/arm/op_div_int_lit8.S
new file mode 100644
index 0000000..5d2dbd3
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_div_int_lit8.S
@@ -0,0 +1,29 @@
+%default {}
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * div-int/lit8
+ *
+ */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r0, r0, r1 @ r0<- op
+#else
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
diff --git a/runtime/interpreter/mterp/arm/op_div_long.S b/runtime/interpreter/mterp/arm/op_div_long.S
new file mode 100644
index 0000000..0f21a84
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_div_long.S
@@ -0,0 +1 @@
+%include "arm/binopWide.S" {"instr":"bl __aeabi_ldivmod", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_div_long_2addr.S b/runtime/interpreter/mterp/arm/op_div_long_2addr.S
new file mode 100644
index 0000000..e172b29
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_div_long_2addr.S
@@ -0,0 +1 @@
+%include "arm/binopWide2addr.S" {"instr":"bl __aeabi_ldivmod", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_double_to_float.S b/runtime/interpreter/mterp/arm/op_double_to_float.S
new file mode 100644
index 0000000..e327000
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_double_to_float.S
@@ -0,0 +1 @@
+%include "arm/funopNarrower.S" {"instr":"fcvtsd s0, d0"}
diff --git a/runtime/interpreter/mterp/arm/op_double_to_int.S b/runtime/interpreter/mterp/arm/op_double_to_int.S
new file mode 100644
index 0000000..aa035de
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_double_to_int.S
@@ -0,0 +1 @@
+%include "arm/funopNarrower.S" {"instr":"ftosizd s0, d0"}
diff --git a/runtime/interpreter/mterp/arm/op_double_to_long.S b/runtime/interpreter/mterp/arm/op_double_to_long.S
new file mode 100644
index 0000000..b100810
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_double_to_long.S
@@ -0,0 +1,52 @@
+@include "arm/unopWide.S" {"instr":"bl __aeabi_d2lz"}
+%include "arm/unopWide.S" {"instr":"bl d2l_doconv"}
+
+%break
+/*
+ * Convert the double in r0/r1 to a long in r0/r1.
+ *
+ * We have to clip values to long min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+d2l_doconv:
+ stmfd sp!, {r4, r5, lr} @ save regs
+ mov r3, #0x43000000 @ maxlong, as a double (high word)
+ add r3, #0x00e00000 @ 0x43e00000
+ mov r2, #0 @ maxlong, as a double (low word)
+ sub sp, sp, #4 @ align for EABI
+ mov r4, r0 @ save a copy of r0
+ mov r5, r1 @ and r1
+ bl __aeabi_dcmpge @ is arg >= maxlong?
+ cmp r0, #0 @ nonzero == yes
+ mvnne r0, #0 @ return maxlong (7fffffffffffffff)
+ mvnne r1, #0x80000000
+ bne 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ mov r3, #0xc3000000 @ minlong, as a double (high word)
+ add r3, #0x00e00000 @ 0xc3e00000
+ mov r2, #0 @ minlong, as a double (low word)
+ bl __aeabi_dcmple @ is arg <= minlong?
+ cmp r0, #0 @ nonzero == yes
+ movne r0, #0 @ return minlong (8000000000000000)
+ movne r1, #0x80000000
+ bne 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ mov r2, r4 @ compare against self
+ mov r3, r5
+ bl __aeabi_dcmpeq @ is arg == self?
+ cmp r0, #0 @ zero == no
+ moveq r1, #0 @ return zero for NaN
+ beq 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ bl __aeabi_d2lz @ convert double to long
+
+1:
+ add sp, sp, #4
+ ldmfd sp!, {r4, r5, pc}
diff --git a/runtime/interpreter/mterp/arm/op_fill_array_data.S b/runtime/interpreter/mterp/arm/op_fill_array_data.S
new file mode 100644
index 0000000..e1ca85c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_fill_array_data.S
@@ -0,0 +1,14 @@
+ /* fill-array-data vAA, +BBBBBBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- bbbb (lo)
+ FETCH r1, 2 @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb
+ GET_VREG r0, r3 @ r0<- vAA (array object)
+ add r1, rPC, r1, lsl #1 @ r1<- PC + BBBBbbbb*2 (array data off.)
+ bl MterpFillArrayData @ (obj, payload)
+ cmp r0, #0 @ 0 means an exception is thrown
+ beq MterpPossibleException @ exception?
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_filled_new_array.S b/runtime/interpreter/mterp/arm/op_filled_new_array.S
new file mode 100644
index 0000000..1075f0c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_filled_new_array.S
@@ -0,0 +1,19 @@
+%default { "helper":"MterpFilledNewArray" }
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+ .extern $helper
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rSELF
+ bl $helper
+ cmp r0, #0
+ beq MterpPossibleException
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_filled_new_array_range.S b/runtime/interpreter/mterp/arm/op_filled_new_array_range.S
new file mode 100644
index 0000000..16567af
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_filled_new_array_range.S
@@ -0,0 +1 @@
+%include "arm/op_filled_new_array.S" { "helper":"MterpFilledNewArrayRange" }
diff --git a/runtime/interpreter/mterp/arm/op_float_to_double.S b/runtime/interpreter/mterp/arm/op_float_to_double.S
new file mode 100644
index 0000000..fb1892b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_float_to_double.S
@@ -0,0 +1 @@
+%include "arm/funopWider.S" {"instr":"fcvtds d0, s0"}
diff --git a/runtime/interpreter/mterp/arm/op_float_to_int.S b/runtime/interpreter/mterp/arm/op_float_to_int.S
new file mode 100644
index 0000000..aab8716
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_float_to_int.S
@@ -0,0 +1 @@
+%include "arm/funop.S" {"instr":"ftosizs s1, s0"}
diff --git a/runtime/interpreter/mterp/arm/op_float_to_long.S b/runtime/interpreter/mterp/arm/op_float_to_long.S
new file mode 100644
index 0000000..24416d3
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_float_to_long.S
@@ -0,0 +1,39 @@
+@include "arm/unopWider.S" {"instr":"bl __aeabi_f2lz"}
+%include "arm/unopWider.S" {"instr":"bl f2l_doconv"}
+
+%break
+/*
+ * Convert the float in r0 to a long in r0/r1.
+ *
+ * We have to clip values to long min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+f2l_doconv:
+ stmfd sp!, {r4, lr}
+ mov r1, #0x5f000000 @ (float)maxlong
+ mov r4, r0
+ bl __aeabi_fcmpge @ is arg >= maxlong?
+ cmp r0, #0 @ nonzero == yes
+ mvnne r0, #0 @ return maxlong (7fffffff)
+ mvnne r1, #0x80000000
+ ldmnefd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ mov r1, #0xdf000000 @ (float)minlong
+ bl __aeabi_fcmple @ is arg <= minlong?
+ cmp r0, #0 @ nonzero == yes
+ movne r0, #0 @ return minlong (80000000)
+ movne r1, #0x80000000
+ ldmnefd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ mov r1, r4
+ bl __aeabi_fcmpeq @ is arg == self?
+ cmp r0, #0 @ zero == no
+ moveq r1, #0 @ return zero for NaN
+ ldmeqfd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ bl __aeabi_f2lz @ convert float to long
+ ldmfd sp!, {r4, pc}
diff --git a/runtime/interpreter/mterp/arm/op_goto.S b/runtime/interpreter/mterp/arm/op_goto.S
new file mode 100644
index 0000000..9b3632a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_goto.S
@@ -0,0 +1,28 @@
+ /*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto +AA */
+ /* tuning: use sbfx for 6t2+ targets */
+#if MTERP_SUSPEND
+ mov r0, rINST, lsl #16 @ r0<- AAxx0000
+ movs r1, r0, asr #24 @ r1<- ssssssAA (sign-extended)
+ add r2, r1, r1 @ r2<- byte offset, set flags
+ @ If backwards branch refresh rIBASE
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh handler base
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ mov r0, rINST, lsl #16 @ r0<- AAxx0000
+ movs r1, r0, asr #24 @ r1<- ssssssAA (sign-extended)
+ add r2, r1, r1 @ r2<- byte offset, set flags
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ @ If backwards branch refresh rIBASE
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
diff --git a/runtime/interpreter/mterp/arm/op_goto_16.S b/runtime/interpreter/mterp/arm/op_goto_16.S
new file mode 100644
index 0000000..2231acd
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_goto_16.S
@@ -0,0 +1,23 @@
+ /*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto/16 +AAAA */
+#if MTERP_SUSPEND
+ FETCH_S r0, 1 @ r0<- ssssAAAA (sign-extended)
+ adds r1, r0, r0 @ r1<- byte offset, flags set
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh handler base
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ FETCH_S r0, 1 @ r0<- ssssAAAA (sign-extended)
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ adds r1, r0, r0 @ r1<- byte offset, flags set
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
diff --git a/runtime/interpreter/mterp/arm/op_goto_32.S b/runtime/interpreter/mterp/arm/op_goto_32.S
new file mode 100644
index 0000000..6b72ff5
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_goto_32.S
@@ -0,0 +1,32 @@
+ /*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ *
+ * Unlike most opcodes, this one is allowed to branch to itself, so
+ * our "backward branch" test must be "<=0" instead of "<0". Because
+ * we need the V bit set, we'll use an adds to convert from Dalvik
+ * offset to byte offset.
+ */
+ /* goto/32 +AAAAAAAA */
+#if MTERP_SUSPEND
+ FETCH r0, 1 @ r0<- aaaa (lo)
+ FETCH r1, 2 @ r1<- AAAA (hi)
+ orr r0, r0, r1, lsl #16 @ r0<- AAAAaaaa
+ adds r1, r0, r0 @ r1<- byte offset
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ldrle rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh handler base
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ FETCH r0, 1 @ r0<- aaaa (lo)
+ FETCH r1, 2 @ r1<- AAAA (hi)
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ orr r0, r0, r1, lsl #16 @ r0<- AAAAaaaa
+ adds r1, r0, r0 @ r1<- byte offset
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ble MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
diff --git a/runtime/interpreter/mterp/arm/op_if_eq.S b/runtime/interpreter/mterp/arm/op_if_eq.S
new file mode 100644
index 0000000..5685686
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_if_eq.S
@@ -0,0 +1 @@
+%include "arm/bincmp.S" { "revcmp":"ne" }
diff --git a/runtime/interpreter/mterp/arm/op_if_eqz.S b/runtime/interpreter/mterp/arm/op_if_eqz.S
new file mode 100644
index 0000000..2a9c0f9
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_if_eqz.S
@@ -0,0 +1 @@
+%include "arm/zcmp.S" { "revcmp":"ne" }
diff --git a/runtime/interpreter/mterp/arm/op_if_ge.S b/runtime/interpreter/mterp/arm/op_if_ge.S
new file mode 100644
index 0000000..60a0307
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_if_ge.S
@@ -0,0 +1 @@
+%include "arm/bincmp.S" { "revcmp":"lt" }
diff --git a/runtime/interpreter/mterp/arm/op_if_gez.S b/runtime/interpreter/mterp/arm/op_if_gez.S
new file mode 100644
index 0000000..981cdec
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_if_gez.S
@@ -0,0 +1 @@
+%include "arm/zcmp.S" { "revcmp":"lt" }
diff --git a/runtime/interpreter/mterp/arm/op_if_gt.S b/runtime/interpreter/mterp/arm/op_if_gt.S
new file mode 100644
index 0000000..ca50cd7
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_if_gt.S
@@ -0,0 +1 @@
+%include "arm/bincmp.S" { "revcmp":"le" }
diff --git a/runtime/interpreter/mterp/arm/op_if_gtz.S b/runtime/interpreter/mterp/arm/op_if_gtz.S
new file mode 100644
index 0000000..c621812
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_if_gtz.S
@@ -0,0 +1 @@
+%include "arm/zcmp.S" { "revcmp":"le" }
diff --git a/runtime/interpreter/mterp/arm/op_if_le.S b/runtime/interpreter/mterp/arm/op_if_le.S
new file mode 100644
index 0000000..7e060f2
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_if_le.S
@@ -0,0 +1 @@
+%include "arm/bincmp.S" { "revcmp":"gt" }
diff --git a/runtime/interpreter/mterp/arm/op_if_lez.S b/runtime/interpreter/mterp/arm/op_if_lez.S
new file mode 100644
index 0000000..f92be23
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_if_lez.S
@@ -0,0 +1 @@
+%include "arm/zcmp.S" { "revcmp":"gt" }
diff --git a/runtime/interpreter/mterp/arm/op_if_lt.S b/runtime/interpreter/mterp/arm/op_if_lt.S
new file mode 100644
index 0000000..213344d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_if_lt.S
@@ -0,0 +1 @@
+%include "arm/bincmp.S" { "revcmp":"ge" }
diff --git a/runtime/interpreter/mterp/arm/op_if_ltz.S b/runtime/interpreter/mterp/arm/op_if_ltz.S
new file mode 100644
index 0000000..dfd4e44
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_if_ltz.S
@@ -0,0 +1 @@
+%include "arm/zcmp.S" { "revcmp":"ge" }
diff --git a/runtime/interpreter/mterp/arm/op_if_ne.S b/runtime/interpreter/mterp/arm/op_if_ne.S
new file mode 100644
index 0000000..4a58b4a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_if_ne.S
@@ -0,0 +1 @@
+%include "arm/bincmp.S" { "revcmp":"eq" }
diff --git a/runtime/interpreter/mterp/arm/op_if_nez.S b/runtime/interpreter/mterp/arm/op_if_nez.S
new file mode 100644
index 0000000..d864ef4
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_if_nez.S
@@ -0,0 +1 @@
+%include "arm/zcmp.S" { "revcmp":"eq" }
diff --git a/runtime/interpreter/mterp/arm/op_iget.S b/runtime/interpreter/mterp/arm/op_iget.S
new file mode 100644
index 0000000..c7f777b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iget.S
@@ -0,0 +1,26 @@
+%default { "is_object":"0", "helper":"artGet32InstanceFromCode"}
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
+ mov r3, rSELF @ r3<- self
+ bl $helper
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ PREFETCH_INST 2
+ cmp r3, #0
+ bne MterpPossibleException @ bail out
+ .if $is_object
+ SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
+ .else
+ SET_VREG r0, r2 @ fp[A]<- r0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iget_boolean.S b/runtime/interpreter/mterp/arm/op_iget_boolean.S
new file mode 100644
index 0000000..628f40a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iget_boolean.S
@@ -0,0 +1 @@
+%include "arm/op_iget.S" { "helper":"artGetBooleanInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_boolean_quick.S b/runtime/interpreter/mterp/arm/op_iget_boolean_quick.S
new file mode 100644
index 0000000..0ae4843
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iget_boolean_quick.S
@@ -0,0 +1 @@
+%include "arm/op_iget_quick.S" { "load":"ldrb" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_byte.S b/runtime/interpreter/mterp/arm/op_iget_byte.S
new file mode 100644
index 0000000..c4e08e2
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iget_byte.S
@@ -0,0 +1 @@
+%include "arm/op_iget.S" { "helper":"artGetByteInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_byte_quick.S b/runtime/interpreter/mterp/arm/op_iget_byte_quick.S
new file mode 100644
index 0000000..e1b3083
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iget_byte_quick.S
@@ -0,0 +1 @@
+%include "arm/op_iget_quick.S" { "load":"ldrsb" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_char.S b/runtime/interpreter/mterp/arm/op_iget_char.S
new file mode 100644
index 0000000..5e8da66
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iget_char.S
@@ -0,0 +1 @@
+%include "arm/op_iget.S" { "helper":"artGetCharInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_char_quick.S b/runtime/interpreter/mterp/arm/op_iget_char_quick.S
new file mode 100644
index 0000000..b44d8f1
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iget_char_quick.S
@@ -0,0 +1 @@
+%include "arm/op_iget_quick.S" { "load":"ldrh" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_object.S b/runtime/interpreter/mterp/arm/op_iget_object.S
new file mode 100644
index 0000000..1cf2e3c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iget_object.S
@@ -0,0 +1 @@
+%include "arm/op_iget.S" { "is_object":"1", "helper":"artGetObjInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_object_quick.S b/runtime/interpreter/mterp/arm/op_iget_object_quick.S
new file mode 100644
index 0000000..fe29106
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iget_object_quick.S
@@ -0,0 +1,17 @@
+ /* For: iget-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r0, r2 @ r0<- object we're operating on
+ cmp r0, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ bl artIGetObjectFromMterp @ (obj, offset)
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ PREFETCH_INST 2
+ cmp r3, #0
+ bne MterpPossibleException @ bail out
+ SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
+ ADVANCE 2 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iget_quick.S b/runtime/interpreter/mterp/arm/op_iget_quick.S
new file mode 100644
index 0000000..0eaf364
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iget_quick.S
@@ -0,0 +1,14 @@
+%default { "load":"ldr" }
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r3, r2 @ r3<- object we're operating on
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ $load r0, [r3, r1] @ r0<- obj.field
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r0, r2 @ fp[A]<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iget_short.S b/runtime/interpreter/mterp/arm/op_iget_short.S
new file mode 100644
index 0000000..460f045
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iget_short.S
@@ -0,0 +1 @@
+%include "arm/op_iget.S" { "helper":"artGetShortInstanceFromCode" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_short_quick.S b/runtime/interpreter/mterp/arm/op_iget_short_quick.S
new file mode 100644
index 0000000..1831b99
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iget_short_quick.S
@@ -0,0 +1 @@
+%include "arm/op_iget_quick.S" { "load":"ldrsh" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_wide.S b/runtime/interpreter/mterp/arm/op_iget_wide.S
new file mode 100644
index 0000000..f8d2f41
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iget_wide.S
@@ -0,0 +1,22 @@
+ /*
+ * 64-bit instance field get.
+ *
+ * for: iget-wide
+ */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
+ mov r3, rSELF @ r3<- self
+ bl artGet64InstanceFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ PREFETCH_INST 2
+ cmp r3, #0
+ bne MterpException @ bail out
+ add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
+ stmia r3, {r0-r1} @ fp[A]<- r0/r1
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iget_wide_quick.S b/runtime/interpreter/mterp/arm/op_iget_wide_quick.S
new file mode 100644
index 0000000..4d6976e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iget_wide_quick.S
@@ -0,0 +1,13 @@
+ /* iget-wide-quick vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH ip, 1 @ ip<- field byte offset
+ GET_VREG r3, r2 @ r3<- object we're operating on
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ ldrd r0, [r3, ip] @ r0<- obj.field (64 bits, aligned)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r3, {r0-r1} @ fp[A]<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_instance_of.S b/runtime/interpreter/mterp/arm/op_instance_of.S
new file mode 100644
index 0000000..e94108c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_instance_of.S
@@ -0,0 +1,24 @@
+ /*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ */
+ /* instance-of vA, vB, class@CCCC */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- vB (object)
+ ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- method
+ mov r3, rSELF @ r3<- self
+ mov r9, rINST, lsr #8 @ r9<- A+
+ and r9, r9, #15 @ r9<- A
+ bl MterpInstanceOf @ (index, obj, method, self)
+ ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ PREFETCH_INST 2
+ cmp r1, #0 @ exception pending?
+ bne MterpException
+ ADVANCE 2 @ advance rPC
+ SET_VREG r0, r9 @ vA<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_int_to_byte.S b/runtime/interpreter/mterp/arm/op_int_to_byte.S
new file mode 100644
index 0000000..059d5c2
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_int_to_byte.S
@@ -0,0 +1 @@
+%include "arm/unop.S" {"instr":"sxtb r0, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_int_to_char.S b/runtime/interpreter/mterp/arm/op_int_to_char.S
new file mode 100644
index 0000000..83a0c19
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_int_to_char.S
@@ -0,0 +1 @@
+%include "arm/unop.S" {"instr":"uxth r0, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_int_to_double.S b/runtime/interpreter/mterp/arm/op_int_to_double.S
new file mode 100644
index 0000000..810c2e4
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_int_to_double.S
@@ -0,0 +1 @@
+%include "arm/funopWider.S" {"instr":"fsitod d0, s0"}
diff --git a/runtime/interpreter/mterp/arm/op_int_to_float.S b/runtime/interpreter/mterp/arm/op_int_to_float.S
new file mode 100644
index 0000000..f41654c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_int_to_float.S
@@ -0,0 +1 @@
+%include "arm/funop.S" {"instr":"fsitos s1, s0"}
diff --git a/runtime/interpreter/mterp/arm/op_int_to_long.S b/runtime/interpreter/mterp/arm/op_int_to_long.S
new file mode 100644
index 0000000..b5aed8e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_int_to_long.S
@@ -0,0 +1 @@
+%include "arm/unopWider.S" {"instr":"mov r1, r0, asr #31"}
diff --git a/runtime/interpreter/mterp/arm/op_int_to_short.S b/runtime/interpreter/mterp/arm/op_int_to_short.S
new file mode 100644
index 0000000..717bd96
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_int_to_short.S
@@ -0,0 +1 @@
+%include "arm/unop.S" {"instr":"sxth r0, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_invoke_direct.S b/runtime/interpreter/mterp/arm/op_invoke_direct.S
new file mode 100644
index 0000000..1edf221
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_invoke_direct.S
@@ -0,0 +1 @@
+%include "arm/invoke.S" { "helper":"MterpInvokeDirect" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_direct_range.S b/runtime/interpreter/mterp/arm/op_invoke_direct_range.S
new file mode 100644
index 0000000..3097b8e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_invoke_direct_range.S
@@ -0,0 +1 @@
+%include "arm/invoke.S" { "helper":"MterpInvokeDirectRange" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_interface.S b/runtime/interpreter/mterp/arm/op_invoke_interface.S
new file mode 100644
index 0000000..f6d565b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_invoke_interface.S
@@ -0,0 +1,8 @@
+%include "arm/invoke.S" { "helper":"MterpInvokeInterface" }
+ /*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/arm/op_invoke_interface_range.S b/runtime/interpreter/mterp/arm/op_invoke_interface_range.S
new file mode 100644
index 0000000..c8443b0
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_invoke_interface_range.S
@@ -0,0 +1 @@
+%include "arm/invoke.S" { "helper":"MterpInvokeInterfaceRange" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_static.S b/runtime/interpreter/mterp/arm/op_invoke_static.S
new file mode 100644
index 0000000..c3cefcf
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_invoke_static.S
@@ -0,0 +1,2 @@
+%include "arm/invoke.S" { "helper":"MterpInvokeStatic" }
+
diff --git a/runtime/interpreter/mterp/arm/op_invoke_static_range.S b/runtime/interpreter/mterp/arm/op_invoke_static_range.S
new file mode 100644
index 0000000..dd60d7b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_invoke_static_range.S
@@ -0,0 +1 @@
+%include "arm/invoke.S" { "helper":"MterpInvokeStaticRange" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_super.S b/runtime/interpreter/mterp/arm/op_invoke_super.S
new file mode 100644
index 0000000..92ef2a4
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_invoke_super.S
@@ -0,0 +1,8 @@
+%include "arm/invoke.S" { "helper":"MterpInvokeSuper" }
+ /*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/arm/op_invoke_super_range.S b/runtime/interpreter/mterp/arm/op_invoke_super_range.S
new file mode 100644
index 0000000..9e4fb1c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_invoke_super_range.S
@@ -0,0 +1 @@
+%include "arm/invoke.S" { "helper":"MterpInvokeSuperRange" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_virtual.S b/runtime/interpreter/mterp/arm/op_invoke_virtual.S
new file mode 100644
index 0000000..5b893ff
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_invoke_virtual.S
@@ -0,0 +1,8 @@
+%include "arm/invoke.S" { "helper":"MterpInvokeVirtual" }
+ /*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/arm/op_invoke_virtual_quick.S b/runtime/interpreter/mterp/arm/op_invoke_virtual_quick.S
new file mode 100644
index 0000000..020e8b8
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_invoke_virtual_quick.S
@@ -0,0 +1 @@
+%include "arm/invoke.S" { "helper":"MterpInvokeVirtualQuick" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_virtual_range.S b/runtime/interpreter/mterp/arm/op_invoke_virtual_range.S
new file mode 100644
index 0000000..2b42a78
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_invoke_virtual_range.S
@@ -0,0 +1 @@
+%include "arm/invoke.S" { "helper":"MterpInvokeVirtualRange" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_virtual_range_quick.S b/runtime/interpreter/mterp/arm/op_invoke_virtual_range_quick.S
new file mode 100644
index 0000000..42f2ded
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_invoke_virtual_range_quick.S
@@ -0,0 +1 @@
+%include "arm/invoke.S" { "helper":"MterpInvokeVirtualQuickRange" }
diff --git a/runtime/interpreter/mterp/arm/op_iput.S b/runtime/interpreter/mterp/arm/op_iput.S
new file mode 100644
index 0000000..d224cd8
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iput.S
@@ -0,0 +1,22 @@
+%default { "is_object":"0", "handler":"artSet32InstanceFromMterp" }
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ .extern $handler
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ GET_VREG r2, r2 @ r2<- fp[A]
+ ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
+ PREFETCH_INST 2
+ bl $handler
+ cmp r0, #0
+ bne MterpPossibleException
+ ADVANCE 2 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iput_boolean.S b/runtime/interpreter/mterp/arm/op_iput_boolean.S
new file mode 100644
index 0000000..c9e8589
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iput_boolean.S
@@ -0,0 +1 @@
+%include "arm/op_iput.S" { "handler":"artSet8InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_boolean_quick.S b/runtime/interpreter/mterp/arm/op_iput_boolean_quick.S
new file mode 100644
index 0000000..f0a2777
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iput_boolean_quick.S
@@ -0,0 +1 @@
+%include "arm/op_iput_quick.S" { "store":"strb" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_byte.S b/runtime/interpreter/mterp/arm/op_iput_byte.S
new file mode 100644
index 0000000..c9e8589
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iput_byte.S
@@ -0,0 +1 @@
+%include "arm/op_iput.S" { "handler":"artSet8InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_byte_quick.S b/runtime/interpreter/mterp/arm/op_iput_byte_quick.S
new file mode 100644
index 0000000..f0a2777
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iput_byte_quick.S
@@ -0,0 +1 @@
+%include "arm/op_iput_quick.S" { "store":"strb" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_char.S b/runtime/interpreter/mterp/arm/op_iput_char.S
new file mode 100644
index 0000000..5046f6b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iput_char.S
@@ -0,0 +1 @@
+%include "arm/op_iput.S" { "handler":"artSet16InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_char_quick.S b/runtime/interpreter/mterp/arm/op_iput_char_quick.S
new file mode 100644
index 0000000..5212fc3
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iput_char_quick.S
@@ -0,0 +1 @@
+%include "arm/op_iput_quick.S" { "store":"strh" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_object.S b/runtime/interpreter/mterp/arm/op_iput_object.S
new file mode 100644
index 0000000..d942e84
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iput_object.S
@@ -0,0 +1,11 @@
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rINST
+ mov r3, rSELF
+ bl MterpIputObject
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iput_object_quick.S b/runtime/interpreter/mterp/arm/op_iput_object_quick.S
new file mode 100644
index 0000000..876b3da
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iput_object_quick.S
@@ -0,0 +1,10 @@
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rINST
+ bl MterpIputObjectQuick
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iput_quick.S b/runtime/interpreter/mterp/arm/op_iput_quick.S
new file mode 100644
index 0000000..98c8150
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iput_quick.S
@@ -0,0 +1,14 @@
+%default { "store":"str" }
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r3, r2 @ r3<- fp[B], the object pointer
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ GET_VREG r0, r2 @ r0<- fp[A]
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ $store r0, [r3, r1] @ obj.field<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iput_short.S b/runtime/interpreter/mterp/arm/op_iput_short.S
new file mode 100644
index 0000000..5046f6b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iput_short.S
@@ -0,0 +1 @@
+%include "arm/op_iput.S" { "handler":"artSet16InstanceFromMterp" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_short_quick.S b/runtime/interpreter/mterp/arm/op_iput_short_quick.S
new file mode 100644
index 0000000..5212fc3
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iput_short_quick.S
@@ -0,0 +1 @@
+%include "arm/op_iput_quick.S" { "store":"strh" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_wide.S b/runtime/interpreter/mterp/arm/op_iput_wide.S
new file mode 100644
index 0000000..8bbd63e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iput_wide.S
@@ -0,0 +1,16 @@
+ /* iput-wide vA, vB, field@CCCC */
+ .extern artSet64InstanceFromMterp
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[A]
+ ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
+ PREFETCH_INST 2
+ bl artSet64InstanceFromMterp
+ cmp r0, #0
+ bne MterpPossibleException
+ ADVANCE 2 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iput_wide_quick.S b/runtime/interpreter/mterp/arm/op_iput_wide_quick.S
new file mode 100644
index 0000000..a2fc9e1
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_iput_wide_quick.S
@@ -0,0 +1,13 @@
+ /* iput-wide-quick vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r3, 1 @ r3<- field byte offset
+ GET_VREG r2, r2 @ r2<- fp[B], the object pointer
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ cmp r2, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ add r0, rFP, r0, lsl #2 @ r0<- &fp[A]
+ ldmia r0, {r0-r1} @ r0/r1<- fp[A]/fp[A+1]
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ strd r0, [r2, r3] @ obj.field<- r0/r1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_long_to_double.S b/runtime/interpreter/mterp/arm/op_long_to_double.S
new file mode 100644
index 0000000..1d48a2a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_long_to_double.S
@@ -0,0 +1,27 @@
+%default {}
+ /*
+ * Specialised 64-bit floating point operation.
+ *
+ * Note: The result will be returned in d2.
+ *
+ * For: long-to-double
+ */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ vldr d0, [r3] @ d0<- vAA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ vcvt.f64.s32 d1, s1 @ d1<- (double)(vAAh)
+ vcvt.f64.u32 d2, s0 @ d2<- (double)(vAAl)
+ vldr d3, constval$opcode
+ vmla.f64 d2, d1, d3 @ d2<- vAAh*2^32 + vAAl
+
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ vstr.64 d2, [r9] @ vAA<- d2
+ GOTO_OPCODE ip @ jump to next instruction
+
+ /* literal pool helper */
+constval${opcode}:
+ .8byte 0x41f0000000000000
diff --git a/runtime/interpreter/mterp/arm/op_long_to_float.S b/runtime/interpreter/mterp/arm/op_long_to_float.S
new file mode 100644
index 0000000..efa5a66
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_long_to_float.S
@@ -0,0 +1 @@
+%include "arm/unopNarrower.S" {"instr":"bl __aeabi_l2f"}
diff --git a/runtime/interpreter/mterp/arm/op_long_to_int.S b/runtime/interpreter/mterp/arm/op_long_to_int.S
new file mode 100644
index 0000000..3e91f23
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_long_to_int.S
@@ -0,0 +1,2 @@
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+%include "arm/op_move.S"
diff --git a/runtime/interpreter/mterp/arm/op_monitor_enter.S b/runtime/interpreter/mterp/arm/op_monitor_enter.S
new file mode 100644
index 0000000..3c34f75
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_monitor_enter.S
@@ -0,0 +1,14 @@
+ /*
+ * Synchronize on an object.
+ */
+ /* monitor-enter vAA */
+ EXPORT_PC
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG r0, r2 @ r0<- vAA (object)
+ mov r1, rSELF @ r1<- self
+ bl artLockObjectFromCode
+ cmp r0, #0
+ bne MterpException
+ FETCH_ADVANCE_INST 1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_monitor_exit.S b/runtime/interpreter/mterp/arm/op_monitor_exit.S
new file mode 100644
index 0000000..fc7cef5
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_monitor_exit.S
@@ -0,0 +1,18 @@
+ /*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction. See the Dalvik
+ * instruction spec.
+ */
+ /* monitor-exit vAA */
+ EXPORT_PC
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG r0, r2 @ r0<- vAA (object)
+ mov r1, rSELF @ r0<- self
+ bl artUnlockObjectFromCode @ r0<- success for unlock(self, obj)
+ cmp r0, #0 @ failed?
+ bne MterpException
+ FETCH_ADVANCE_INST 1 @ before throw: advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move.S b/runtime/interpreter/mterp/arm/op_move.S
new file mode 100644
index 0000000..dfecc24
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_move.S
@@ -0,0 +1,14 @@
+%default { "is_object":"0" }
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B from 15:12
+ ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ GET_VREG r2, r1 @ r2<- fp[B]
+ GET_INST_OPCODE ip @ ip<- opcode from rINST
+ .if $is_object
+ SET_VREG_OBJECT r2, r0 @ fp[A]<- r2
+ .else
+ SET_VREG r2, r0 @ fp[A]<- r2
+ .endif
+ GOTO_OPCODE ip @ execute next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_16.S b/runtime/interpreter/mterp/arm/op_move_16.S
new file mode 100644
index 0000000..78138a2
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_move_16.S
@@ -0,0 +1,14 @@
+%default { "is_object":"0" }
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ FETCH r1, 2 @ r1<- BBBB
+ FETCH r0, 1 @ r0<- AAAA
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ GET_VREG r2, r1 @ r2<- fp[BBBB]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ .if $is_object
+ SET_VREG_OBJECT r2, r0 @ fp[AAAA]<- r2
+ .else
+ SET_VREG r2, r0 @ fp[AAAA]<- r2
+ .endif
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_exception.S b/runtime/interpreter/mterp/arm/op_move_exception.S
new file mode 100644
index 0000000..0242e26
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_move_exception.S
@@ -0,0 +1,9 @@
+ /* move-exception vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ mov r1, #0 @ r1<- 0
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ SET_VREG_OBJECT r3, r2 @ fp[AA]<- exception obj
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ str r1, [rSELF, #THREAD_EXCEPTION_OFFSET] @ clear exception
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_from16.S b/runtime/interpreter/mterp/arm/op_move_from16.S
new file mode 100644
index 0000000..3e79417
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_move_from16.S
@@ -0,0 +1,14 @@
+%default { "is_object":"0" }
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ FETCH r1, 1 @ r1<- BBBB
+ mov r0, rINST, lsr #8 @ r0<- AA
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_VREG r2, r1 @ r2<- fp[BBBB]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ .if $is_object
+ SET_VREG_OBJECT r2, r0 @ fp[AA]<- r2
+ .else
+ SET_VREG r2, r0 @ fp[AA]<- r2
+ .endif
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_object.S b/runtime/interpreter/mterp/arm/op_move_object.S
new file mode 100644
index 0000000..16de57b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_move_object.S
@@ -0,0 +1 @@
+%include "arm/op_move.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_move_object_16.S b/runtime/interpreter/mterp/arm/op_move_object_16.S
new file mode 100644
index 0000000..2534300
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_move_object_16.S
@@ -0,0 +1 @@
+%include "arm/op_move_16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_move_object_from16.S b/runtime/interpreter/mterp/arm/op_move_object_from16.S
new file mode 100644
index 0000000..9e0cf02
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_move_object_from16.S
@@ -0,0 +1 @@
+%include "arm/op_move_from16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_move_result.S b/runtime/interpreter/mterp/arm/op_move_result.S
new file mode 100644
index 0000000..f2586a0
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_move_result.S
@@ -0,0 +1,14 @@
+%default { "is_object":"0" }
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ ldr r0, [rFP, #OFF_FP_RESULT_REGISTER] @ get pointer to result JType.
+ ldr r0, [r0] @ r0 <- result.i.
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ .if $is_object
+ SET_VREG_OBJECT r0, r2, r1 @ fp[AA]<- r0
+ .else
+ SET_VREG r0, r2 @ fp[AA]<- r0
+ .endif
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_result_object.S b/runtime/interpreter/mterp/arm/op_move_result_object.S
new file mode 100644
index 0000000..643296a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_move_result_object.S
@@ -0,0 +1 @@
+%include "arm/op_move_result.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_move_result_wide.S b/runtime/interpreter/mterp/arm/op_move_result_wide.S
new file mode 100644
index 0000000..c64103c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_move_result_wide.S
@@ -0,0 +1,9 @@
+ /* move-result-wide vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ ldr r3, [rFP, #OFF_FP_RESULT_REGISTER]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ ldmia r3, {r0-r1} @ r0/r1<- retval.j
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ stmia r2, {r0-r1} @ fp[AA]<- r0/r1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_wide.S b/runtime/interpreter/mterp/arm/op_move_wide.S
new file mode 100644
index 0000000..1345b95
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_move_wide.S
@@ -0,0 +1,11 @@
+ /* move-wide vA, vB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- fp[B]
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r2, {r0-r1} @ fp[A]<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_wide_16.S b/runtime/interpreter/mterp/arm/op_move_wide_16.S
new file mode 100644
index 0000000..133a4c3
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_move_wide_16.S
@@ -0,0 +1,11 @@
+ /* move-wide/16 vAAAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ FETCH r3, 2 @ r3<- BBBB
+ FETCH r2, 1 @ r2<- AAAA
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AAAA]
+ ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ stmia r2, {r0-r1} @ fp[AAAA]<- r0/r1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_wide_from16.S b/runtime/interpreter/mterp/arm/op_move_wide_from16.S
new file mode 100644
index 0000000..f2ae785
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_move_wide_from16.S
@@ -0,0 +1,11 @@
+ /* move-wide/from16 vAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ FETCH r3, 1 @ r3<- BBBB
+ mov r2, rINST, lsr #8 @ r2<- AA
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r2, {r0-r1} @ fp[AA]<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_mul_double.S b/runtime/interpreter/mterp/arm/op_mul_double.S
new file mode 100644
index 0000000..530e85a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_mul_double.S
@@ -0,0 +1 @@
+%include "arm/fbinopWide.S" {"instr":"fmuld d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_double_2addr.S b/runtime/interpreter/mterp/arm/op_mul_double_2addr.S
new file mode 100644
index 0000000..da1abc6
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_mul_double_2addr.S
@@ -0,0 +1 @@
+%include "arm/fbinopWide2addr.S" {"instr":"fmuld d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_float.S b/runtime/interpreter/mterp/arm/op_mul_float.S
new file mode 100644
index 0000000..6a72e6f
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_mul_float.S
@@ -0,0 +1 @@
+%include "arm/fbinop.S" {"instr":"fmuls s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_float_2addr.S b/runtime/interpreter/mterp/arm/op_mul_float_2addr.S
new file mode 100644
index 0000000..edb5101
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_mul_float_2addr.S
@@ -0,0 +1 @@
+%include "arm/fbinop2addr.S" {"instr":"fmuls s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_int.S b/runtime/interpreter/mterp/arm/op_mul_int.S
new file mode 100644
index 0000000..d6151d4
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_mul_int.S
@@ -0,0 +1,2 @@
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+%include "arm/binop.S" {"instr":"mul r0, r1, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_int_2addr.S b/runtime/interpreter/mterp/arm/op_mul_int_2addr.S
new file mode 100644
index 0000000..66a797d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_mul_int_2addr.S
@@ -0,0 +1,2 @@
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+%include "arm/binop2addr.S" {"instr":"mul r0, r1, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_int_lit16.S b/runtime/interpreter/mterp/arm/op_mul_int_lit16.S
new file mode 100644
index 0000000..4e40c43
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_mul_int_lit16.S
@@ -0,0 +1,2 @@
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+%include "arm/binopLit16.S" {"instr":"mul r0, r1, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_int_lit8.S b/runtime/interpreter/mterp/arm/op_mul_int_lit8.S
new file mode 100644
index 0000000..dbafae9
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_mul_int_lit8.S
@@ -0,0 +1,2 @@
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+%include "arm/binopLit8.S" {"instr":"mul r0, r1, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_long.S b/runtime/interpreter/mterp/arm/op_mul_long.S
new file mode 100644
index 0000000..9e83778
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_mul_long.S
@@ -0,0 +1,36 @@
+ /*
+ * Signed 64-bit integer multiply.
+ *
+ * Consider WXxYZ (r1r0 x r3r2) with a long multiply:
+ * WX
+ * x YZ
+ * --------
+ * ZW ZX
+ * YW YX
+ *
+ * The low word of the result holds ZX, the high word holds
+ * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because
+ * it doesn't fit in the low 64 bits.
+ *
+ * Unlike most ARM math operations, multiply instructions have
+ * restrictions on using the same register more than once (Rd and Rm
+ * cannot be the same).
+ */
+ /* mul-long vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ mul ip, r2, r1 @ ip<- ZxW
+ umull r9, r10, r2, r0 @ r9/r10 <- ZxX
+ mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
+ mov r0, rINST, lsr #8 @ r0<- AA
+ add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
+ add r0, rFP, r0, lsl #2 @ r0<- &fp[AA]
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_mul_long_2addr.S b/runtime/interpreter/mterp/arm/op_mul_long_2addr.S
new file mode 100644
index 0000000..789dbd3
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_mul_long_2addr.S
@@ -0,0 +1,24 @@
+ /*
+ * Signed 64-bit integer multiply, "/2addr" version.
+ *
+ * See op_mul_long for an explanation.
+ *
+ * We get a little tight on registers, so to avoid looking up &fp[A]
+ * again we stuff it into rINST.
+ */
+ /* mul-long/2addr vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1
+ mul ip, r2, r1 @ ip<- ZxW
+ umull r9, r10, r2, r0 @ r9/r10 <- ZxX
+ mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
+ mov r0, rINST @ r0<- &fp[A] (free up rINST)
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_neg_double.S b/runtime/interpreter/mterp/arm/op_neg_double.S
new file mode 100644
index 0000000..33e609c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_neg_double.S
@@ -0,0 +1 @@
+%include "arm/unopWide.S" {"instr":"add r1, r1, #0x80000000"}
diff --git a/runtime/interpreter/mterp/arm/op_neg_float.S b/runtime/interpreter/mterp/arm/op_neg_float.S
new file mode 100644
index 0000000..993583f
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_neg_float.S
@@ -0,0 +1 @@
+%include "arm/unop.S" {"instr":"add r0, r0, #0x80000000"}
diff --git a/runtime/interpreter/mterp/arm/op_neg_int.S b/runtime/interpreter/mterp/arm/op_neg_int.S
new file mode 100644
index 0000000..ec0b253
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_neg_int.S
@@ -0,0 +1 @@
+%include "arm/unop.S" {"instr":"rsb r0, r0, #0"}
diff --git a/runtime/interpreter/mterp/arm/op_neg_long.S b/runtime/interpreter/mterp/arm/op_neg_long.S
new file mode 100644
index 0000000..dab2eb4
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_neg_long.S
@@ -0,0 +1 @@
+%include "arm/unopWide.S" {"preinstr":"rsbs r0, r0, #0", "instr":"rsc r1, r1, #0"}
diff --git a/runtime/interpreter/mterp/arm/op_new_array.S b/runtime/interpreter/mterp/arm/op_new_array.S
new file mode 100644
index 0000000..8bb792c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_new_array.S
@@ -0,0 +1,19 @@
+ /*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+ /* new-array vA, vB, class@CCCC */
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rINST
+ mov r3, rSELF
+ bl MterpNewArray
+ cmp r0, #0
+ beq MterpPossibleException
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_new_instance.S b/runtime/interpreter/mterp/arm/op_new_instance.S
new file mode 100644
index 0000000..95d4be8
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_new_instance.S
@@ -0,0 +1,14 @@
+ /*
+ * Create a new instance of a class.
+ */
+ /* new-instance vAA, class@BBBB */
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rSELF
+ mov r2, rINST
+ bl MterpNewInstance @ (shadow_frame, self, inst_data)
+ cmp r0, #0
+ beq MterpPossibleException
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_nop.S b/runtime/interpreter/mterp/arm/op_nop.S
new file mode 100644
index 0000000..af0f88f
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_nop.S
@@ -0,0 +1,3 @@
+ FETCH_ADVANCE_INST 1 @ advance to next instr, load rINST
+ GET_INST_OPCODE ip @ ip<- opcode from rINST
+ GOTO_OPCODE ip @ execute it
diff --git a/runtime/interpreter/mterp/arm/op_not_int.S b/runtime/interpreter/mterp/arm/op_not_int.S
new file mode 100644
index 0000000..816485a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_not_int.S
@@ -0,0 +1 @@
+%include "arm/unop.S" {"instr":"mvn r0, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_not_long.S b/runtime/interpreter/mterp/arm/op_not_long.S
new file mode 100644
index 0000000..49a5905
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_not_long.S
@@ -0,0 +1 @@
+%include "arm/unopWide.S" {"preinstr":"mvn r0, r0", "instr":"mvn r1, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_or_int.S b/runtime/interpreter/mterp/arm/op_or_int.S
new file mode 100644
index 0000000..b046e8d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_or_int.S
@@ -0,0 +1 @@
+%include "arm/binop.S" {"instr":"orr r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_or_int_2addr.S b/runtime/interpreter/mterp/arm/op_or_int_2addr.S
new file mode 100644
index 0000000..493c59f
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_or_int_2addr.S
@@ -0,0 +1 @@
+%include "arm/binop2addr.S" {"instr":"orr r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_or_int_lit16.S b/runtime/interpreter/mterp/arm/op_or_int_lit16.S
new file mode 100644
index 0000000..0a01db8
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_or_int_lit16.S
@@ -0,0 +1 @@
+%include "arm/binopLit16.S" {"instr":"orr r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_or_int_lit8.S b/runtime/interpreter/mterp/arm/op_or_int_lit8.S
new file mode 100644
index 0000000..2d85038
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_or_int_lit8.S
@@ -0,0 +1 @@
+%include "arm/binopLit8.S" {"instr":"orr r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_or_long.S b/runtime/interpreter/mterp/arm/op_or_long.S
new file mode 100644
index 0000000..048c45c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_or_long.S
@@ -0,0 +1 @@
+%include "arm/binopWide.S" {"preinstr":"orr r0, r0, r2", "instr":"orr r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_or_long_2addr.S b/runtime/interpreter/mterp/arm/op_or_long_2addr.S
new file mode 100644
index 0000000..9395346
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_or_long_2addr.S
@@ -0,0 +1 @@
+%include "arm/binopWide2addr.S" {"preinstr":"orr r0, r0, r2", "instr":"orr r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_packed_switch.S b/runtime/interpreter/mterp/arm/op_packed_switch.S
new file mode 100644
index 0000000..1e3370e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_packed_switch.S
@@ -0,0 +1,39 @@
+%default { "func":"MterpDoPackedSwitch" }
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+#if MTERP_SUSPEND
+ FETCH r0, 1 @ r0<- bbbb (lo)
+ FETCH r1, 2 @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_VREG r1, r3 @ r1<- vAA
+ add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2
+ bl $func @ r0<- code-unit branch offset
+ adds r1, r0, r0 @ r1<- byte offset; clear V
+ ldrle rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh handler base
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ FETCH r0, 1 @ r0<- bbbb (lo)
+ FETCH r1, 2 @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_VREG r1, r3 @ r1<- vAA
+ add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2
+ bl $func @ r0<- code-unit branch offset
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ adds r1, r0, r0 @ r1<- byte offset; clear V
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ble MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
diff --git a/runtime/interpreter/mterp/arm/op_rem_double.S b/runtime/interpreter/mterp/arm/op_rem_double.S
new file mode 100644
index 0000000..b539221
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_rem_double.S
@@ -0,0 +1,2 @@
+/* EABI doesn't define a double remainder function, but libm does */
+%include "arm/binopWide.S" {"instr":"bl fmod"}
diff --git a/runtime/interpreter/mterp/arm/op_rem_double_2addr.S b/runtime/interpreter/mterp/arm/op_rem_double_2addr.S
new file mode 100644
index 0000000..372ef1d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_rem_double_2addr.S
@@ -0,0 +1,2 @@
+/* EABI doesn't define a double remainder function, but libm does */
+%include "arm/binopWide2addr.S" {"instr":"bl fmod"}
diff --git a/runtime/interpreter/mterp/arm/op_rem_float.S b/runtime/interpreter/mterp/arm/op_rem_float.S
new file mode 100644
index 0000000..7bd10de
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_rem_float.S
@@ -0,0 +1,2 @@
+/* EABI doesn't define a float remainder function, but libm does */
+%include "arm/binop.S" {"instr":"bl fmodf"}
diff --git a/runtime/interpreter/mterp/arm/op_rem_float_2addr.S b/runtime/interpreter/mterp/arm/op_rem_float_2addr.S
new file mode 100644
index 0000000..93c5fae
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_rem_float_2addr.S
@@ -0,0 +1,2 @@
+/* EABI doesn't define a float remainder function, but libm does */
+%include "arm/binop2addr.S" {"instr":"bl fmodf"}
diff --git a/runtime/interpreter/mterp/arm/op_rem_int.S b/runtime/interpreter/mterp/arm/op_rem_int.S
new file mode 100644
index 0000000..ff62573
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_rem_int.S
@@ -0,0 +1,33 @@
+%default {}
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * NOTE: idivmod returns quotient in r0 and remainder in r1
+ *
+ * rem-int
+ *
+ */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r2, r0, r1
+ mls r1, r1, r2, r0 @ r1<- op, r0-r2 changed
+#else
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r1, r9 @ vAA<- r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
diff --git a/runtime/interpreter/mterp/arm/op_rem_int_2addr.S b/runtime/interpreter/mterp/arm/op_rem_int_2addr.S
new file mode 100644
index 0000000..ba5751a
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_rem_int_2addr.S
@@ -0,0 +1,32 @@
+%default {}
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * NOTE: idivmod returns quotient in r0 and remainder in r1
+ *
+ * rem-int/2addr
+ *
+ */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r2, r0, r1
+ mls r1, r1, r2, r0 @ r1<- op
+#else
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r1, r9 @ vAA<- r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
diff --git a/runtime/interpreter/mterp/arm/op_rem_int_lit16.S b/runtime/interpreter/mterp/arm/op_rem_int_lit16.S
new file mode 100644
index 0000000..4edb187
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_rem_int_lit16.S
@@ -0,0 +1,31 @@
+%default {}
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * NOTE: idivmod returns quotient in r0 and remainder in r1
+ *
+ * rem-int/lit16
+ *
+ */
+ FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r2 @ r0<- vB
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r2, r0, r1
+ mls r1, r1, r2, r0 @ r1<- op
+#else
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r1, r9 @ vAA<- r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm/op_rem_int_lit8.S b/runtime/interpreter/mterp/arm/op_rem_int_lit8.S
new file mode 100644
index 0000000..3888361
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_rem_int_lit8.S
@@ -0,0 +1,32 @@
+%default {}
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * NOTE: idivmod returns quotient in r0 and remainder in r1
+ *
+ * rem-int/lit8
+ *
+ */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r2, r0, r1
+ mls r1, r1, r2, r0 @ r1<- op
+#else
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r1, r9 @ vAA<- r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
diff --git a/runtime/interpreter/mterp/arm/op_rem_long.S b/runtime/interpreter/mterp/arm/op_rem_long.S
new file mode 100644
index 0000000..b2b1c24
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_rem_long.S
@@ -0,0 +1,2 @@
+/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
+%include "arm/binopWide.S" {"instr":"bl __aeabi_ldivmod", "result0":"r2", "result1":"r3", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_rem_long_2addr.S b/runtime/interpreter/mterp/arm/op_rem_long_2addr.S
new file mode 100644
index 0000000..f87d493
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_rem_long_2addr.S
@@ -0,0 +1,2 @@
+/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
+%include "arm/binopWide2addr.S" {"instr":"bl __aeabi_ldivmod", "result0":"r2", "result1":"r3", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_return.S b/runtime/interpreter/mterp/arm/op_return.S
new file mode 100644
index 0000000..a4ffd04
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_return.S
@@ -0,0 +1,12 @@
+ /*
+ * Return a 32-bit value.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ bl MterpThreadFenceForConstructor
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG r0, r2 @ r0<- vAA
+ mov r1, #0
+ b MterpReturn
diff --git a/runtime/interpreter/mterp/arm/op_return_object.S b/runtime/interpreter/mterp/arm/op_return_object.S
new file mode 100644
index 0000000..c490730
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_return_object.S
@@ -0,0 +1 @@
+%include "arm/op_return.S"
diff --git a/runtime/interpreter/mterp/arm/op_return_void.S b/runtime/interpreter/mterp/arm/op_return_void.S
new file mode 100644
index 0000000..f6dfd99
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_return_void.S
@@ -0,0 +1,5 @@
+ .extern MterpThreadFenceForConstructor
+ bl MterpThreadFenceForConstructor
+ mov r0, #0
+ mov r1, #0
+ b MterpReturn
diff --git a/runtime/interpreter/mterp/arm/op_return_void_no_barrier.S b/runtime/interpreter/mterp/arm/op_return_void_no_barrier.S
new file mode 100644
index 0000000..7322940
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_return_void_no_barrier.S
@@ -0,0 +1,3 @@
+ mov r0, #0
+ mov r1, #0
+ b MterpReturn
diff --git a/runtime/interpreter/mterp/arm/op_return_wide.S b/runtime/interpreter/mterp/arm/op_return_wide.S
new file mode 100644
index 0000000..2881c87
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_return_wide.S
@@ -0,0 +1,10 @@
+ /*
+ * Return a 64-bit value.
+ */
+ /* return-wide vAA */
+ .extern MterpThreadFenceForConstructor
+ bl MterpThreadFenceForConstructor
+ mov r2, rINST, lsr #8 @ r2<- AA
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1
+ b MterpReturn
diff --git a/runtime/interpreter/mterp/arm/op_rsub_int.S b/runtime/interpreter/mterp/arm/op_rsub_int.S
new file mode 100644
index 0000000..1508dd4
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_rsub_int.S
@@ -0,0 +1,2 @@
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+%include "arm/binopLit16.S" {"instr":"rsb r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_rsub_int_lit8.S b/runtime/interpreter/mterp/arm/op_rsub_int_lit8.S
new file mode 100644
index 0000000..2ee11e1
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_rsub_int_lit8.S
@@ -0,0 +1 @@
+%include "arm/binopLit8.S" {"instr":"rsb r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_sget.S b/runtime/interpreter/mterp/arm/op_sget.S
new file mode 100644
index 0000000..2b81f50
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sget.S
@@ -0,0 +1,27 @@
+%default { "is_object":"0", "helper":"artGet32StaticFromCode" }
+ /*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+
+ .extern $helper
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ ldr r1, [rFP, #OFF_FP_METHOD]
+ mov r2, rSELF
+ bl $helper
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ mov r2, rINST, lsr #8 @ r2<- AA
+ PREFETCH_INST 2
+ cmp r3, #0 @ Fail to resolve?
+ bne MterpException @ bail out
+.if $is_object
+ SET_VREG_OBJECT r0, r2 @ fp[AA]<- r0
+.else
+ SET_VREG r0, r2 @ fp[AA]<- r0
+.endif
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip
diff --git a/runtime/interpreter/mterp/arm/op_sget_boolean.S b/runtime/interpreter/mterp/arm/op_sget_boolean.S
new file mode 100644
index 0000000..ebfb44c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sget_boolean.S
@@ -0,0 +1 @@
+%include "arm/op_sget.S" {"helper":"artGetBooleanStaticFromCode"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_byte.S b/runtime/interpreter/mterp/arm/op_sget_byte.S
new file mode 100644
index 0000000..d76862e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sget_byte.S
@@ -0,0 +1 @@
+%include "arm/op_sget.S" {"helper":"artGetByteStaticFromCode"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_char.S b/runtime/interpreter/mterp/arm/op_sget_char.S
new file mode 100644
index 0000000..b7fcfc2
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sget_char.S
@@ -0,0 +1 @@
+%include "arm/op_sget.S" {"helper":"artGetCharStaticFromCode"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_object.S b/runtime/interpreter/mterp/arm/op_sget_object.S
new file mode 100644
index 0000000..8e7d075
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sget_object.S
@@ -0,0 +1 @@
+%include "arm/op_sget.S" {"is_object":"1", "helper":"artGetObjStaticFromCode"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_short.S b/runtime/interpreter/mterp/arm/op_sget_short.S
new file mode 100644
index 0000000..3e80f0d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sget_short.S
@@ -0,0 +1 @@
+%include "arm/op_sget.S" {"helper":"artGetShortStaticFromCode"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_wide.S b/runtime/interpreter/mterp/arm/op_sget_wide.S
new file mode 100644
index 0000000..97db05f
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sget_wide.S
@@ -0,0 +1,21 @@
+ /*
+ * SGET_WIDE handler wrapper.
+ *
+ */
+ /* sget-wide vAA, field@BBBB */
+
+ .extern artGet64StaticFromCode
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ ldr r1, [rFP, #OFF_FP_METHOD]
+ mov r2, rSELF
+ bl artGet64StaticFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ mov r9, rINST, lsr #8 @ r9<- AA
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ cmp r3, #0 @ Fail to resolve?
+ bne MterpException @ bail out
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_shl_int.S b/runtime/interpreter/mterp/arm/op_shl_int.S
new file mode 100644
index 0000000..7e4c768
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_shl_int.S
@@ -0,0 +1 @@
+%include "arm/binop.S" {"preinstr":"and r1, r1, #31", "instr":"mov r0, r0, asl r1"}
diff --git a/runtime/interpreter/mterp/arm/op_shl_int_2addr.S b/runtime/interpreter/mterp/arm/op_shl_int_2addr.S
new file mode 100644
index 0000000..4286577
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_shl_int_2addr.S
@@ -0,0 +1 @@
+%include "arm/binop2addr.S" {"preinstr":"and r1, r1, #31", "instr":"mov r0, r0, asl r1"}
diff --git a/runtime/interpreter/mterp/arm/op_shl_int_lit8.S b/runtime/interpreter/mterp/arm/op_shl_int_lit8.S
new file mode 100644
index 0000000..6a48bfc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_shl_int_lit8.S
@@ -0,0 +1 @@
+%include "arm/binopLit8.S" {"preinstr":"and r1, r1, #31", "instr":"mov r0, r0, asl r1"}
diff --git a/runtime/interpreter/mterp/arm/op_shl_long.S b/runtime/interpreter/mterp/arm/op_shl_long.S
new file mode 100644
index 0000000..dc8a679
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_shl_long.S
@@ -0,0 +1,27 @@
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* shl-long vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r3, r0, #255 @ r3<- BB
+ mov r0, r0, lsr #8 @ r0<- CC
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ GET_VREG r2, r0 @ r2<- vCC
+ ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+
+ mov r1, r1, asl r2 @ r1<- r1 << r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mov r0, r0, asl r2 @ r0<- r0 << r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_shl_long_2addr.S b/runtime/interpreter/mterp/arm/op_shl_long_2addr.S
new file mode 100644
index 0000000..fd7668d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_shl_long_2addr.S
@@ -0,0 +1,22 @@
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shl-long/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r2, r3 @ r2<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+
+ mov r1, r1, asl r2 @ r1<- r1 << r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
+ mov r0, r0, asl r2 @ r0<- r0 << r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_shr_int.S b/runtime/interpreter/mterp/arm/op_shr_int.S
new file mode 100644
index 0000000..6317605
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_shr_int.S
@@ -0,0 +1 @@
+%include "arm/binop.S" {"preinstr":"and r1, r1, #31", "instr":"mov r0, r0, asr r1"}
diff --git a/runtime/interpreter/mterp/arm/op_shr_int_2addr.S b/runtime/interpreter/mterp/arm/op_shr_int_2addr.S
new file mode 100644
index 0000000..cc8632f
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_shr_int_2addr.S
@@ -0,0 +1 @@
+%include "arm/binop2addr.S" {"preinstr":"and r1, r1, #31", "instr":"mov r0, r0, asr r1"}
diff --git a/runtime/interpreter/mterp/arm/op_shr_int_lit8.S b/runtime/interpreter/mterp/arm/op_shr_int_lit8.S
new file mode 100644
index 0000000..60fe5fc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_shr_int_lit8.S
@@ -0,0 +1 @@
+%include "arm/binopLit8.S" {"preinstr":"and r1, r1, #31", "instr":"mov r0, r0, asr r1"}
diff --git a/runtime/interpreter/mterp/arm/op_shr_long.S b/runtime/interpreter/mterp/arm/op_shr_long.S
new file mode 100644
index 0000000..c0edf90
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_shr_long.S
@@ -0,0 +1,27 @@
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* shr-long vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r3, r0, #255 @ r3<- BB
+ mov r0, r0, lsr #8 @ r0<- CC
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ GET_VREG r2, r0 @ r2<- vCC
+ ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ and r2, r2, #63 @ r0<- r0 & 0x3f
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mov r1, r1, asr r2 @ r1<- r1 >> r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_shr_long_2addr.S b/runtime/interpreter/mterp/arm/op_shr_long_2addr.S
new file mode 100644
index 0000000..ffeaf9c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_shr_long_2addr.S
@@ -0,0 +1,22 @@
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shr-long/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r2, r3 @ r2<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
+ mov r1, r1, asr r2 @ r1<- r1 >> r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_sparse_switch.S b/runtime/interpreter/mterp/arm/op_sparse_switch.S
new file mode 100644
index 0000000..9f7a42b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sparse_switch.S
@@ -0,0 +1 @@
+%include "arm/op_packed_switch.S" { "func":"MterpDoSparseSwitch" }
diff --git a/runtime/interpreter/mterp/arm/op_sput.S b/runtime/interpreter/mterp/arm/op_sput.S
new file mode 100644
index 0000000..7e0c1a6
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sput.S
@@ -0,0 +1,20 @@
+%default { "helper":"artSet32StaticFromCode"}
+ /*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ mov r3, rINST, lsr #8 @ r3<- AA
+ GET_VREG r1, r3 @ r1<= fp[AA]
+ ldr r2, [rFP, #OFF_FP_METHOD]
+ mov r3, rSELF
+ PREFETCH_INST 2 @ Get next inst, but don't advance rPC
+ bl $helper
+ cmp r0, #0 @ 0 on success, -1 on failure
+ bne MterpException
+ ADVANCE 2 @ Past exception point - now advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_sput_boolean.S b/runtime/interpreter/mterp/arm/op_sput_boolean.S
new file mode 100644
index 0000000..e3bbf2b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sput_boolean.S
@@ -0,0 +1 @@
+%include "arm/op_sput.S" {"helper":"artSet8StaticFromCode"}
diff --git a/runtime/interpreter/mterp/arm/op_sput_byte.S b/runtime/interpreter/mterp/arm/op_sput_byte.S
new file mode 100644
index 0000000..e3bbf2b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sput_byte.S
@@ -0,0 +1 @@
+%include "arm/op_sput.S" {"helper":"artSet8StaticFromCode"}
diff --git a/runtime/interpreter/mterp/arm/op_sput_char.S b/runtime/interpreter/mterp/arm/op_sput_char.S
new file mode 100644
index 0000000..d8d65cb
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sput_char.S
@@ -0,0 +1 @@
+%include "arm/op_sput.S" {"helper":"artSet16StaticFromCode"}
diff --git a/runtime/interpreter/mterp/arm/op_sput_object.S b/runtime/interpreter/mterp/arm/op_sput_object.S
new file mode 100644
index 0000000..6d3a9a7
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sput_object.S
@@ -0,0 +1,11 @@
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rINST
+ mov r3, rSELF
+ bl MterpSputObject
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_sput_short.S b/runtime/interpreter/mterp/arm/op_sput_short.S
new file mode 100644
index 0000000..d8d65cb
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sput_short.S
@@ -0,0 +1 @@
+%include "arm/op_sput.S" {"helper":"artSet16StaticFromCode"}
diff --git a/runtime/interpreter/mterp/arm/op_sput_wide.S b/runtime/interpreter/mterp/arm/op_sput_wide.S
new file mode 100644
index 0000000..adbcffa
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sput_wide.S
@@ -0,0 +1,19 @@
+ /*
+ * SPUT_WIDE handler wrapper.
+ *
+ */
+ /* sput-wide vAA, field@BBBB */
+ .extern artSet64IndirectStaticFromMterp
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ ldr r1, [rFP, #OFF_FP_METHOD]
+ mov r2, rINST, lsr #8 @ r3<- AA
+ add r2, rFP, r2, lsl #2
+ mov r3, rSELF
+ PREFETCH_INST 2 @ Get next inst, but don't advance rPC
+ bl artSet64IndirectStaticFromMterp
+ cmp r0, #0 @ 0 on success, -1 on failure
+ bne MterpException
+ ADVANCE 2 @ Past exception point - now advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_sub_double.S b/runtime/interpreter/mterp/arm/op_sub_double.S
new file mode 100644
index 0000000..69bcc67
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sub_double.S
@@ -0,0 +1 @@
+%include "arm/fbinopWide.S" {"instr":"fsubd d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_sub_double_2addr.S b/runtime/interpreter/mterp/arm/op_sub_double_2addr.S
new file mode 100644
index 0000000..2ea59fe
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sub_double_2addr.S
@@ -0,0 +1 @@
+%include "arm/fbinopWide2addr.S" {"instr":"fsubd d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_sub_float.S b/runtime/interpreter/mterp/arm/op_sub_float.S
new file mode 100644
index 0000000..3f17a0d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sub_float.S
@@ -0,0 +1 @@
+%include "arm/fbinop.S" {"instr":"fsubs s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_sub_float_2addr.S b/runtime/interpreter/mterp/arm/op_sub_float_2addr.S
new file mode 100644
index 0000000..2f4aac4
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sub_float_2addr.S
@@ -0,0 +1 @@
+%include "arm/fbinop2addr.S" {"instr":"fsubs s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_sub_int.S b/runtime/interpreter/mterp/arm/op_sub_int.S
new file mode 100644
index 0000000..efb9e10
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sub_int.S
@@ -0,0 +1 @@
+%include "arm/binop.S" {"instr":"sub r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_sub_int_2addr.S b/runtime/interpreter/mterp/arm/op_sub_int_2addr.S
new file mode 100644
index 0000000..4d3036b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sub_int_2addr.S
@@ -0,0 +1 @@
+%include "arm/binop2addr.S" {"instr":"sub r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_sub_long.S b/runtime/interpreter/mterp/arm/op_sub_long.S
new file mode 100644
index 0000000..6f1eb6e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sub_long.S
@@ -0,0 +1 @@
+%include "arm/binopWide.S" {"preinstr":"subs r0, r0, r2", "instr":"sbc r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_sub_long_2addr.S b/runtime/interpreter/mterp/arm/op_sub_long_2addr.S
new file mode 100644
index 0000000..8e9da05
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_sub_long_2addr.S
@@ -0,0 +1 @@
+%include "arm/binopWide2addr.S" {"preinstr":"subs r0, r0, r2", "instr":"sbc r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_throw.S b/runtime/interpreter/mterp/arm/op_throw.S
new file mode 100644
index 0000000..be49ada
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_throw.S
@@ -0,0 +1,11 @@
+ /*
+ * Throw an exception object in the current thread.
+ */
+ /* throw vAA */
+ EXPORT_PC
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG r1, r2 @ r1<- vAA (exception object)
+ cmp r1, #0 @ null object?
+ beq common_errNullObject @ yes, throw an NPE instead
+ str r1, [rSELF, #THREAD_EXCEPTION_OFFSET] @ thread->exception<- obj
+ b MterpException
diff --git a/runtime/interpreter/mterp/arm/op_unused_3e.S b/runtime/interpreter/mterp/arm/op_unused_3e.S
new file mode 100644
index 0000000..10948dc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_3e.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_3f.S b/runtime/interpreter/mterp/arm/op_unused_3f.S
new file mode 100644
index 0000000..10948dc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_3f.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_40.S b/runtime/interpreter/mterp/arm/op_unused_40.S
new file mode 100644
index 0000000..10948dc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_40.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_41.S b/runtime/interpreter/mterp/arm/op_unused_41.S
new file mode 100644
index 0000000..10948dc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_41.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_42.S b/runtime/interpreter/mterp/arm/op_unused_42.S
new file mode 100644
index 0000000..10948dc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_42.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_43.S b/runtime/interpreter/mterp/arm/op_unused_43.S
new file mode 100644
index 0000000..10948dc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_43.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_73.S b/runtime/interpreter/mterp/arm/op_unused_73.S
new file mode 100644
index 0000000..10948dc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_73.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_79.S b/runtime/interpreter/mterp/arm/op_unused_79.S
new file mode 100644
index 0000000..10948dc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_79.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_7a.S b/runtime/interpreter/mterp/arm/op_unused_7a.S
new file mode 100644
index 0000000..10948dc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_7a.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_f3.S b/runtime/interpreter/mterp/arm/op_unused_f3.S
new file mode 100644
index 0000000..10948dc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_f3.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_f4.S b/runtime/interpreter/mterp/arm/op_unused_f4.S
new file mode 100644
index 0000000..10948dc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_f4.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_f5.S b/runtime/interpreter/mterp/arm/op_unused_f5.S
new file mode 100644
index 0000000..10948dc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_f5.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_f6.S b/runtime/interpreter/mterp/arm/op_unused_f6.S
new file mode 100644
index 0000000..10948dc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_f6.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_f7.S b/runtime/interpreter/mterp/arm/op_unused_f7.S
new file mode 100644
index 0000000..10948dc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_f7.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_f8.S b/runtime/interpreter/mterp/arm/op_unused_f8.S
new file mode 100644
index 0000000..10948dc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_f8.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_f9.S b/runtime/interpreter/mterp/arm/op_unused_f9.S
new file mode 100644
index 0000000..10948dc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_f9.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_fa.S b/runtime/interpreter/mterp/arm/op_unused_fa.S
new file mode 100644
index 0000000..10948dc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_fa.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_fb.S b/runtime/interpreter/mterp/arm/op_unused_fb.S
new file mode 100644
index 0000000..10948dc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_fb.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_fc.S b/runtime/interpreter/mterp/arm/op_unused_fc.S
new file mode 100644
index 0000000..10948dc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_fc.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_fd.S b/runtime/interpreter/mterp/arm/op_unused_fd.S
new file mode 100644
index 0000000..10948dc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_fd.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_fe.S b/runtime/interpreter/mterp/arm/op_unused_fe.S
new file mode 100644
index 0000000..10948dc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_fe.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_ff.S b/runtime/interpreter/mterp/arm/op_unused_ff.S
new file mode 100644
index 0000000..10948dc
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_unused_ff.S
@@ -0,0 +1 @@
+%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_ushr_int.S b/runtime/interpreter/mterp/arm/op_ushr_int.S
new file mode 100644
index 0000000..a74361b
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_ushr_int.S
@@ -0,0 +1 @@
+%include "arm/binop.S" {"preinstr":"and r1, r1, #31", "instr":"mov r0, r0, lsr r1"}
diff --git a/runtime/interpreter/mterp/arm/op_ushr_int_2addr.S b/runtime/interpreter/mterp/arm/op_ushr_int_2addr.S
new file mode 100644
index 0000000..f2d1d13
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_ushr_int_2addr.S
@@ -0,0 +1 @@
+%include "arm/binop2addr.S" {"preinstr":"and r1, r1, #31", "instr":"mov r0, r0, lsr r1"}
diff --git a/runtime/interpreter/mterp/arm/op_ushr_int_lit8.S b/runtime/interpreter/mterp/arm/op_ushr_int_lit8.S
new file mode 100644
index 0000000..40a4435
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_ushr_int_lit8.S
@@ -0,0 +1 @@
+%include "arm/binopLit8.S" {"preinstr":"and r1, r1, #31", "instr":"mov r0, r0, lsr r1"}
diff --git a/runtime/interpreter/mterp/arm/op_ushr_long.S b/runtime/interpreter/mterp/arm/op_ushr_long.S
new file mode 100644
index 0000000..f64c861
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_ushr_long.S
@@ -0,0 +1,27 @@
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* ushr-long vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r3, r0, #255 @ r3<- BB
+ mov r0, r0, lsr #8 @ r0<- CC
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ GET_VREG r2, r0 @ r2<- vCC
+ ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ and r2, r2, #63 @ r0<- r0 & 0x3f
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mov r1, r1, lsr r2 @ r1<- r1 >>> r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_ushr_long_2addr.S b/runtime/interpreter/mterp/arm/op_ushr_long_2addr.S
new file mode 100644
index 0000000..dbab08d
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_ushr_long_2addr.S
@@ -0,0 +1,22 @@
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* ushr-long/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r2, r3 @ r2<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
+ mov r1, r1, lsr r2 @ r1<- r1 >>> r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_xor_int.S b/runtime/interpreter/mterp/arm/op_xor_int.S
new file mode 100644
index 0000000..fd7a4b7
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_xor_int.S
@@ -0,0 +1 @@
+%include "arm/binop.S" {"instr":"eor r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_xor_int_2addr.S b/runtime/interpreter/mterp/arm/op_xor_int_2addr.S
new file mode 100644
index 0000000..196a665
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_xor_int_2addr.S
@@ -0,0 +1 @@
+%include "arm/binop2addr.S" {"instr":"eor r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_xor_int_lit16.S b/runtime/interpreter/mterp/arm/op_xor_int_lit16.S
new file mode 100644
index 0000000..39f2a47
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_xor_int_lit16.S
@@ -0,0 +1 @@
+%include "arm/binopLit16.S" {"instr":"eor r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_xor_int_lit8.S b/runtime/interpreter/mterp/arm/op_xor_int_lit8.S
new file mode 100644
index 0000000..46bb712
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_xor_int_lit8.S
@@ -0,0 +1 @@
+%include "arm/binopLit8.S" {"instr":"eor r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_xor_long.S b/runtime/interpreter/mterp/arm/op_xor_long.S
new file mode 100644
index 0000000..4f830d0
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_xor_long.S
@@ -0,0 +1 @@
+%include "arm/binopWide.S" {"preinstr":"eor r0, r0, r2", "instr":"eor r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_xor_long_2addr.S b/runtime/interpreter/mterp/arm/op_xor_long_2addr.S
new file mode 100644
index 0000000..5b5ed88
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/op_xor_long_2addr.S
@@ -0,0 +1 @@
+%include "arm/binopWide2addr.S" {"preinstr":"eor r0, r0, r2", "instr":"eor r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/unop.S b/runtime/interpreter/mterp/arm/unop.S
new file mode 100644
index 0000000..56518b5
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/unop.S
@@ -0,0 +1,20 @@
+%default {"preinstr":""}
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r3 @ r0<- vB
+ $preinstr @ optional op; may set condition codes
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ $instr @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 8-9 instructions */
diff --git a/runtime/interpreter/mterp/arm/unopNarrower.S b/runtime/interpreter/mterp/arm/unopNarrower.S
new file mode 100644
index 0000000..a5fc027
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/unopNarrower.S
@@ -0,0 +1,23 @@
+%default {"preinstr":""}
+ /*
+ * Generic 64bit-to-32bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0/r1", where
+ * "result" is a 32-bit quantity in r0.
+ *
+ * For: long-to-float, double-to-int, double-to-float
+ *
+ * (This would work for long-to-int, but that instruction is actually
+ * an exact match for op_move.)
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ $preinstr @ optional op; may set condition codes
+ $instr @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 9-10 instructions */
diff --git a/runtime/interpreter/mterp/arm/unopWide.S b/runtime/interpreter/mterp/arm/unopWide.S
new file mode 100644
index 0000000..7b8739c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/unopWide.S
@@ -0,0 +1,21 @@
+%default {"preinstr":""}
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ $preinstr @ optional op; may set condition codes
+ $instr @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-11 instructions */
diff --git a/runtime/interpreter/mterp/arm/unopWider.S b/runtime/interpreter/mterp/arm/unopWider.S
new file mode 100644
index 0000000..657a395
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/unopWider.S
@@ -0,0 +1,20 @@
+%default {"preinstr":""}
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0", where
+ * "result" is a 64-bit quantity in r0/r1.
+ *
+ * For: int-to-long, int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r3 @ r0<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ $preinstr @ optional op; may set condition codes
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ $instr @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 9-10 instructions */
diff --git a/runtime/interpreter/mterp/arm/unused.S b/runtime/interpreter/mterp/arm/unused.S
new file mode 100644
index 0000000..ffa00be
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/unused.S
@@ -0,0 +1,4 @@
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
diff --git a/runtime/interpreter/mterp/arm/zcmp.S b/runtime/interpreter/mterp/arm/zcmp.S
new file mode 100644
index 0000000..6e9ef55
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/zcmp.S
@@ -0,0 +1,32 @@
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+#if MTERP_SUSPEND
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG r2, r0 @ r2<- vAA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, #0 @ compare (vA, 0)
+ mov${revcmp} r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh table base
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG r2, r0 @ r2<- vAA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ cmp r2, #0 @ compare (vA, 0)
+ mov${revcmp} r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
diff --git a/runtime/interpreter/mterp/config_arm b/runtime/interpreter/mterp/config_arm
new file mode 100644
index 0000000..436dcd2
--- /dev/null
+++ b/runtime/interpreter/mterp/config_arm
@@ -0,0 +1,298 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Configuration for ARMv7-A targets.
+#
+
+handler-style computed-goto
+handler-size 128
+
+# source for alternate entry stub
+asm-alt-stub arm/alt_stub.S
+
+# file header and basic definitions
+import arm/header.S
+
+# arch-specific entry point to interpreter
+import arm/entry.S
+
+# Stub to switch to alternate interpreter
+fallback-stub arm/fallback.S
+
+# opcode list; argument to op-start is default directory
+op-start arm
+ # (override example:) op op_sub_float_2addr arm-vfp
+ # (fallback example:) op op_sub_float_2addr FALLBACK
+
+ # op op_nop FALLBACK
+ # op op_move FALLBACK
+ # op op_move_from16 FALLBACK
+ # op op_move_16 FALLBACK
+ # op op_move_wide FALLBACK
+ # op op_move_wide_from16 FALLBACK
+ # op op_move_wide_16 FALLBACK
+ # op op_move_object FALLBACK
+ # op op_move_object_from16 FALLBACK
+ # op op_move_object_16 FALLBACK
+ # op op_move_result FALLBACK
+ # op op_move_result_wide FALLBACK
+ # op op_move_result_object FALLBACK
+ # op op_move_exception FALLBACK
+ # op op_return_void FALLBACK
+ # op op_return FALLBACK
+ # op op_return_wide FALLBACK
+ # op op_return_object FALLBACK
+ # op op_const_4 FALLBACK
+ # op op_const_16 FALLBACK
+ # op op_const FALLBACK
+ # op op_const_high16 FALLBACK
+ # op op_const_wide_16 FALLBACK
+ # op op_const_wide_32 FALLBACK
+ # op op_const_wide FALLBACK
+ # op op_const_wide_high16 FALLBACK
+ # op op_const_string FALLBACK
+ # op op_const_string_jumbo FALLBACK
+ # op op_const_class FALLBACK
+ # op op_monitor_enter FALLBACK
+ # op op_monitor_exit FALLBACK
+ # op op_check_cast FALLBACK
+ # op op_instance_of FALLBACK
+ # op op_array_length FALLBACK
+ # op op_new_instance FALLBACK
+ # op op_new_array FALLBACK
+ # op op_filled_new_array FALLBACK
+ # op op_filled_new_array_range FALLBACK
+ # op op_fill_array_data FALLBACK
+ # op op_throw FALLBACK
+ # op op_goto FALLBACK
+ # op op_goto_16 FALLBACK
+ # op op_goto_32 FALLBACK
+ # op op_packed_switch FALLBACK
+ # op op_sparse_switch FALLBACK
+ # op op_cmpl_float FALLBACK
+ # op op_cmpg_float FALLBACK
+ # op op_cmpl_double FALLBACK
+ # op op_cmpg_double FALLBACK
+ # op op_cmp_long FALLBACK
+ # op op_if_eq FALLBACK
+ # op op_if_ne FALLBACK
+ # op op_if_lt FALLBACK
+ # op op_if_ge FALLBACK
+ # op op_if_gt FALLBACK
+ # op op_if_le FALLBACK
+ # op op_if_eqz FALLBACK
+ # op op_if_nez FALLBACK
+ # op op_if_ltz FALLBACK
+ # op op_if_gez FALLBACK
+ # op op_if_gtz FALLBACK
+ # op op_if_lez FALLBACK
+ # op op_unused_3e FALLBACK
+ # op op_unused_3f FALLBACK
+ # op op_unused_40 FALLBACK
+ # op op_unused_41 FALLBACK
+ # op op_unused_42 FALLBACK
+ # op op_unused_43 FALLBACK
+ # op op_aget FALLBACK
+ # op op_aget_wide FALLBACK
+ # op op_aget_object FALLBACK
+ # op op_aget_boolean FALLBACK
+ # op op_aget_byte FALLBACK
+ # op op_aget_char FALLBACK
+ # op op_aget_short FALLBACK
+ # op op_aput FALLBACK
+ # op op_aput_wide FALLBACK
+ # op op_aput_object FALLBACK
+ # op op_aput_boolean FALLBACK
+ # op op_aput_byte FALLBACK
+ # op op_aput_char FALLBACK
+ # op op_aput_short FALLBACK
+ # op op_iget FALLBACK
+ # op op_iget_wide FALLBACK
+ # op op_iget_object FALLBACK
+ # op op_iget_boolean FALLBACK
+ # op op_iget_byte FALLBACK
+ # op op_iget_char FALLBACK
+ # op op_iget_short FALLBACK
+ # op op_iput FALLBACK
+ # op op_iput_wide FALLBACK
+ # op op_iput_object FALLBACK
+ # op op_iput_boolean FALLBACK
+ # op op_iput_byte FALLBACK
+ # op op_iput_char FALLBACK
+ # op op_iput_short FALLBACK
+ # op op_sget FALLBACK
+ # op op_sget_wide FALLBACK
+ # op op_sget_object FALLBACK
+ # op op_sget_boolean FALLBACK
+ # op op_sget_byte FALLBACK
+ # op op_sget_char FALLBACK
+ # op op_sget_short FALLBACK
+ # op op_sput FALLBACK
+ # op op_sput_wide FALLBACK
+ # op op_sput_object FALLBACK
+ # op op_sput_boolean FALLBACK
+ # op op_sput_byte FALLBACK
+ # op op_sput_char FALLBACK
+ # op op_sput_short FALLBACK
+ # op op_invoke_virtual FALLBACK
+ # op op_invoke_super FALLBACK
+ # op op_invoke_direct FALLBACK
+ # op op_invoke_static FALLBACK
+ # op op_invoke_interface FALLBACK
+ # op op_return_void_no_barrier FALLBACK
+ # op op_invoke_virtual_range FALLBACK
+ # op op_invoke_super_range FALLBACK
+ # op op_invoke_direct_range FALLBACK
+ # op op_invoke_static_range FALLBACK
+ # op op_invoke_interface_range FALLBACK
+ # op op_unused_79 FALLBACK
+ # op op_unused_7a FALLBACK
+ # op op_neg_int FALLBACK
+ # op op_not_int FALLBACK
+ # op op_neg_long FALLBACK
+ # op op_not_long FALLBACK
+ # op op_neg_float FALLBACK
+ # op op_neg_double FALLBACK
+ # op op_int_to_long FALLBACK
+ # op op_int_to_float FALLBACK
+ # op op_int_to_double FALLBACK
+ # op op_long_to_int FALLBACK
+ # op op_long_to_float FALLBACK
+ # op op_long_to_double FALLBACK
+ # op op_float_to_int FALLBACK
+ # op op_float_to_long FALLBACK
+ # op op_float_to_double FALLBACK
+ # op op_double_to_int FALLBACK
+ # op op_double_to_long FALLBACK
+ # op op_double_to_float FALLBACK
+ # op op_int_to_byte FALLBACK
+ # op op_int_to_char FALLBACK
+ # op op_int_to_short FALLBACK
+ # op op_add_int FALLBACK
+ # op op_sub_int FALLBACK
+ # op op_mul_int FALLBACK
+ # op op_div_int FALLBACK
+ # op op_rem_int FALLBACK
+ # op op_and_int FALLBACK
+ # op op_or_int FALLBACK
+ # op op_xor_int FALLBACK
+ # op op_shl_int FALLBACK
+ # op op_shr_int FALLBACK
+ # op op_ushr_int FALLBACK
+ # op op_add_long FALLBACK
+ # op op_sub_long FALLBACK
+ # op op_mul_long FALLBACK
+ # op op_div_long FALLBACK
+ # op op_rem_long FALLBACK
+ # op op_and_long FALLBACK
+ # op op_or_long FALLBACK
+ # op op_xor_long FALLBACK
+ # op op_shl_long FALLBACK
+ # op op_shr_long FALLBACK
+ # op op_ushr_long FALLBACK
+ # op op_add_float FALLBACK
+ # op op_sub_float FALLBACK
+ # op op_mul_float FALLBACK
+ # op op_div_float FALLBACK
+ # op op_rem_float FALLBACK
+ # op op_add_double FALLBACK
+ # op op_sub_double FALLBACK
+ # op op_mul_double FALLBACK
+ # op op_div_double FALLBACK
+ # op op_rem_double FALLBACK
+ # op op_add_int_2addr FALLBACK
+ # op op_sub_int_2addr FALLBACK
+ # op op_mul_int_2addr FALLBACK
+ # op op_div_int_2addr FALLBACK
+ # op op_rem_int_2addr FALLBACK
+ # op op_and_int_2addr FALLBACK
+ # op op_or_int_2addr FALLBACK
+ # op op_xor_int_2addr FALLBACK
+ # op op_shl_int_2addr FALLBACK
+ # op op_shr_int_2addr FALLBACK
+ # op op_ushr_int_2addr FALLBACK
+ # op op_add_long_2addr FALLBACK
+ # op op_sub_long_2addr FALLBACK
+ # op op_mul_long_2addr FALLBACK
+ # op op_div_long_2addr FALLBACK
+ # op op_rem_long_2addr FALLBACK
+ # op op_and_long_2addr FALLBACK
+ # op op_or_long_2addr FALLBACK
+ # op op_xor_long_2addr FALLBACK
+ # op op_shl_long_2addr FALLBACK
+ # op op_shr_long_2addr FALLBACK
+ # op op_ushr_long_2addr FALLBACK
+ # op op_add_float_2addr FALLBACK
+ # op op_sub_float_2addr FALLBACK
+ # op op_mul_float_2addr FALLBACK
+ # op op_div_float_2addr FALLBACK
+ # op op_rem_float_2addr FALLBACK
+ # op op_add_double_2addr FALLBACK
+ # op op_sub_double_2addr FALLBACK
+ # op op_mul_double_2addr FALLBACK
+ # op op_div_double_2addr FALLBACK
+ # op op_rem_double_2addr FALLBACK
+ # op op_add_int_lit16 FALLBACK
+ # op op_rsub_int FALLBACK
+ # op op_mul_int_lit16 FALLBACK
+ # op op_div_int_lit16 FALLBACK
+ # op op_rem_int_lit16 FALLBACK
+ # op op_and_int_lit16 FALLBACK
+ # op op_or_int_lit16 FALLBACK
+ # op op_xor_int_lit16 FALLBACK
+ # op op_add_int_lit8 FALLBACK
+ # op op_rsub_int_lit8 FALLBACK
+ # op op_mul_int_lit8 FALLBACK
+ # op op_div_int_lit8 FALLBACK
+ # op op_rem_int_lit8 FALLBACK
+ # op op_and_int_lit8 FALLBACK
+ # op op_or_int_lit8 FALLBACK
+ # op op_xor_int_lit8 FALLBACK
+ # op op_shl_int_lit8 FALLBACK
+ # op op_shr_int_lit8 FALLBACK
+ # op op_ushr_int_lit8 FALLBACK
+ # op op_iget_quick FALLBACK
+ # op op_iget_wide_quick FALLBACK
+ # op op_iget_object_quick FALLBACK
+ # op op_iput_quick FALLBACK
+ # op op_iput_wide_quick FALLBACK
+ # op op_iput_object_quick FALLBACK
+ # op op_invoke_virtual_quick FALLBACK
+ # op op_invoke_virtual_range_quick FALLBACK
+ # op op_iput_boolean_quick FALLBACK
+ # op op_iput_byte_quick FALLBACK
+ # op op_iput_char_quick FALLBACK
+ # op op_iput_short_quick FALLBACK
+ # op op_iget_boolean_quick FALLBACK
+ # op op_iget_byte_quick FALLBACK
+ # op op_iget_char_quick FALLBACK
+ # op op_iget_short_quick FALLBACK
+ op op_invoke_lambda FALLBACK
+ # op op_unused_f4 FALLBACK
+ op op_capture_variable FALLBACK
+ op op_create_lambda FALLBACK
+ op op_liberate_variable FALLBACK
+ op op_box_lambda FALLBACK
+ op op_unbox_lambda FALLBACK
+ # op op_unused_fa FALLBACK
+ # op op_unused_fb FALLBACK
+ # op op_unused_fc FALLBACK
+ # op op_unused_fd FALLBACK
+ # op op_unused_fe FALLBACK
+ # op op_unused_ff FALLBACK
+op-end
+
+# common subroutines for asm
+import arm/footer.S
diff --git a/runtime/interpreter/mterp/config_arm64 b/runtime/interpreter/mterp/config_arm64
new file mode 100644
index 0000000..ef3c721
--- /dev/null
+++ b/runtime/interpreter/mterp/config_arm64
@@ -0,0 +1,298 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Configuration for ARM64
+#
+
+handler-style computed-goto
+handler-size 128
+
+# source for alternate entry stub
+asm-alt-stub arm64/alt_stub.S
+
+# file header and basic definitions
+import arm64/header.S
+
+# arch-specific entry point to interpreter
+import arm64/entry.S
+
+# Stub to switch to alternate interpreter
+fallback-stub arm64/fallback.S
+
+# opcode list; argument to op-start is default directory
+op-start arm64
+ # (override example:) op OP_SUB_FLOAT_2ADDR arm-vfp
+ # (fallback example:) op OP_SUB_FLOAT_2ADDR FALLBACK
+
+ op op_nop FALLBACK
+ op op_move FALLBACK
+ op op_move_from16 FALLBACK
+ op op_move_16 FALLBACK
+ op op_move_wide FALLBACK
+ op op_move_wide_from16 FALLBACK
+ op op_move_wide_16 FALLBACK
+ op op_move_object FALLBACK
+ op op_move_object_from16 FALLBACK
+ op op_move_object_16 FALLBACK
+ op op_move_result FALLBACK
+ op op_move_result_wide FALLBACK
+ op op_move_result_object FALLBACK
+ op op_move_exception FALLBACK
+ op op_return_void FALLBACK
+ op op_return FALLBACK
+ op op_return_wide FALLBACK
+ op op_return_object FALLBACK
+ op op_const_4 FALLBACK
+ op op_const_16 FALLBACK
+ op op_const FALLBACK
+ op op_const_high16 FALLBACK
+ op op_const_wide_16 FALLBACK
+ op op_const_wide_32 FALLBACK
+ op op_const_wide FALLBACK
+ op op_const_wide_high16 FALLBACK
+ op op_const_string FALLBACK
+ op op_const_string_jumbo FALLBACK
+ op op_const_class FALLBACK
+ op op_monitor_enter FALLBACK
+ op op_monitor_exit FALLBACK
+ op op_check_cast FALLBACK
+ op op_instance_of FALLBACK
+ op op_array_length FALLBACK
+ op op_new_instance FALLBACK
+ op op_new_array FALLBACK
+ op op_filled_new_array FALLBACK
+ op op_filled_new_array_range FALLBACK
+ op op_fill_array_data FALLBACK
+ op op_throw FALLBACK
+ op op_goto FALLBACK
+ op op_goto_16 FALLBACK
+ op op_goto_32 FALLBACK
+ op op_packed_switch FALLBACK
+ op op_sparse_switch FALLBACK
+ op op_cmpl_float FALLBACK
+ op op_cmpg_float FALLBACK
+ op op_cmpl_double FALLBACK
+ op op_cmpg_double FALLBACK
+ op op_cmp_long FALLBACK
+ op op_if_eq FALLBACK
+ op op_if_ne FALLBACK
+ op op_if_lt FALLBACK
+ op op_if_ge FALLBACK
+ op op_if_gt FALLBACK
+ op op_if_le FALLBACK
+ op op_if_eqz FALLBACK
+ op op_if_nez FALLBACK
+ op op_if_ltz FALLBACK
+ op op_if_gez FALLBACK
+ op op_if_gtz FALLBACK
+ op op_if_lez FALLBACK
+ op_unused_3e FALLBACK
+ op_unused_3f FALLBACK
+ op_unused_40 FALLBACK
+ op_unused_41 FALLBACK
+ op_unused_42 FALLBACK
+ op_unused_43 FALLBACK
+ op op_aget FALLBACK
+ op op_aget_wide FALLBACK
+ op op_aget_object FALLBACK
+ op op_aget_boolean FALLBACK
+ op op_aget_byte FALLBACK
+ op op_aget_char FALLBACK
+ op op_aget_short FALLBACK
+ op op_aput FALLBACK
+ op op_aput_wide FALLBACK
+ op op_aput_object FALLBACK
+ op op_aput_boolean FALLBACK
+ op op_aput_byte FALLBACK
+ op op_aput_char FALLBACK
+ op op_aput_short FALLBACK
+ op op_iget FALLBACK
+ op op_iget_wide FALLBACK
+ op op_iget_object FALLBACK
+ op op_iget_boolean FALLBACK
+ op op_iget_byte FALLBACK
+ op op_iget_char FALLBACK
+ op op_iget_short FALLBACK
+ op op_iput FALLBACK
+ op op_iput_wide FALLBACK
+ op op_iput_object FALLBACK
+ op op_iput_boolean FALLBACK
+ op op_iput_byte FALLBACK
+ op op_iput_char FALLBACK
+ op op_iput_short FALLBACK
+ op op_sget FALLBACK
+ op op_sget_wide FALLBACK
+ op op_sget_object FALLBACK
+ op op_sget_boolean FALLBACK
+ op op_sget_byte FALLBACK
+ op op_sget_char FALLBACK
+ op op_sget_short FALLBACK
+ op op_sput FALLBACK
+ op op_sput_wide FALLBACK
+ op op_sput_object FALLBACK
+ op op_sput_boolean FALLBACK
+ op op_sput_byte FALLBACK
+ op op_sput_char FALLBACK
+ op op_sput_short FALLBACK
+ op op_invoke_virtual FALLBACK
+ op op_invoke_super FALLBACK
+ op op_invoke_direct FALLBACK
+ op op_invoke_static FALLBACK
+ op op_invoke_interface FALLBACK
+ op op_return_void_no_barrier FALLBACK
+ op op_invoke_virtual_range FALLBACK
+ op op_invoke_super_range FALLBACK
+ op op_invoke_direct_range FALLBACK
+ op op_invoke_static_range FALLBACK
+ op op_invoke_interface_range FALLBACK
+ op_unused_79 FALLBACK
+ op_unused_7a FALLBACK
+ op op_neg_int FALLBACK
+ op op_not_int FALLBACK
+ op op_neg_long FALLBACK
+ op op_not_long FALLBACK
+ op op_neg_float FALLBACK
+ op op_neg_double FALLBACK
+ op op_int_to_long FALLBACK
+ op op_int_to_float FALLBACK
+ op op_int_to_double FALLBACK
+ op op_long_to_int FALLBACK
+ op op_long_to_float FALLBACK
+ op op_long_to_double FALLBACK
+ op op_float_to_int FALLBACK
+ op op_float_to_long FALLBACK
+ op op_float_to_double FALLBACK
+ op op_double_to_int FALLBACK
+ op op_double_to_long FALLBACK
+ op op_double_to_float FALLBACK
+ op op_int_to_byte FALLBACK
+ op op_int_to_char FALLBACK
+ op op_int_to_short FALLBACK
+ op op_add_int FALLBACK
+ op op_sub_int FALLBACK
+ op op_mul_int FALLBACK
+ op op_div_int FALLBACK
+ op op_rem_int FALLBACK
+ op op_and_int FALLBACK
+ op op_or_int FALLBACK
+ op op_xor_int FALLBACK
+ op op_shl_int FALLBACK
+ op op_shr_int FALLBACK
+ op op_ushr_int FALLBACK
+ op op_add_long FALLBACK
+ op op_sub_long FALLBACK
+ op op_mul_long FALLBACK
+ op op_div_long FALLBACK
+ op op_rem_long FALLBACK
+ op op_and_long FALLBACK
+ op op_or_long FALLBACK
+ op op_xor_long FALLBACK
+ op op_shl_long FALLBACK
+ op op_shr_long FALLBACK
+ op op_ushr_long FALLBACK
+ op op_add_float FALLBACK
+ op op_sub_float FALLBACK
+ op op_mul_float FALLBACK
+ op op_div_float FALLBACK
+ op op_rem_float FALLBACK
+ op op_add_double FALLBACK
+ op op_sub_double FALLBACK
+ op op_mul_double FALLBACK
+ op op_div_double FALLBACK
+ op op_rem_double FALLBACK
+ op op_add_int_2addr FALLBACK
+ op op_sub_int_2addr FALLBACK
+ op op_mul_int_2addr FALLBACK
+ op op_div_int_2addr FALLBACK
+ op op_rem_int_2addr FALLBACK
+ op op_and_int_2addr FALLBACK
+ op op_or_int_2addr FALLBACK
+ op op_xor_int_2addr FALLBACK
+ op op_shl_int_2addr FALLBACK
+ op op_shr_int_2addr FALLBACK
+ op op_ushr_int_2addr FALLBACK
+ op op_add_long_2addr FALLBACK
+ op op_sub_long_2addr FALLBACK
+ op op_mul_long_2addr FALLBACK
+ op op_div_long_2addr FALLBACK
+ op op_rem_long_2addr FALLBACK
+ op op_and_long_2addr FALLBACK
+ op op_or_long_2addr FALLBACK
+ op op_xor_long_2addr FALLBACK
+ op op_shl_long_2addr FALLBACK
+ op op_shr_long_2addr FALLBACK
+ op op_ushr_long_2addr FALLBACK
+ op op_add_float_2addr FALLBACK
+ op op_sub_float_2addr FALLBACK
+ op op_mul_float_2addr FALLBACK
+ op op_div_float_2addr FALLBACK
+ op op_rem_float_2addr FALLBACK
+ op op_add_double_2addr FALLBACK
+ op op_sub_double_2addr FALLBACK
+ op op_mul_double_2addr FALLBACK
+ op op_div_double_2addr FALLBACK
+ op op_rem_double_2addr FALLBACK
+ op op_add_int_lit16 FALLBACK
+ op op_rsub_int FALLBACK
+ op op_mul_int_lit16 FALLBACK
+ op op_div_int_lit16 FALLBACK
+ op op_rem_int_lit16 FALLBACK
+ op op_and_int_lit16 FALLBACK
+ op op_or_int_lit16 FALLBACK
+ op op_xor_int_lit16 FALLBACK
+ op op_add_int_lit8 FALLBACK
+ op op_rsub_int_lit8 FALLBACK
+ op op_mul_int_lit8 FALLBACK
+ op op_div_int_lit8 FALLBACK
+ op op_rem_int_lit8 FALLBACK
+ op op_and_int_lit8 FALLBACK
+ op op_or_int_lit8 FALLBACK
+ op op_xor_int_lit8 FALLBACK
+ op op_shl_int_lit8 FALLBACK
+ op op_shr_int_lit8 FALLBACK
+ op op_ushr_int_lit8 FALLBACK
+ op op_iget_quick FALLBACK
+ op op_iget_wide_quick FALLBACK
+ op op_iget_object_quick FALLBACK
+ op op_iput_quick FALLBACK
+ op op_iput_wide_quick FALLBACK
+ op op_iput_object_quick FALLBACK
+ op op_invoke_virtual_quick FALLBACK
+ op op_invoke_virtual_range_quick FALLBACK
+ op op_iput_boolean_quick FALLBACK
+ op op_iput_byte_quick FALLBACK
+ op op_iput_char_quick FALLBACK
+ op op_iput_short_quick FALLBACK
+ op op_iget_boolean_quick FALLBACK
+ op op_iget_byte_quick FALLBACK
+ op op_iget_char_quick FALLBACK
+ op op_iget_short_quick FALLBACK
+ op_unused_f3 FALLBACK
+ op_unused_f4 FALLBACK
+ op_unused_f5 FALLBACK
+ op_unused_f6 FALLBACK
+ op_unused_f7 FALLBACK
+ op_unused_f8 FALLBACK
+ op_unused_f9 FALLBACK
+ op_unused_fa FALLBACK
+ op_unused_fb FALLBACK
+ op_unused_fc FALLBACK
+ op_unused_fd FALLBACK
+ op_unused_fe FALLBACK
+ op_unused_ff FALLBACK
+op-end
+
+# common subroutines for asm
+import arm64/footer.S
diff --git a/runtime/interpreter/mterp/config_mips b/runtime/interpreter/mterp/config_mips
new file mode 100644
index 0000000..d1221f7
--- /dev/null
+++ b/runtime/interpreter/mterp/config_mips
@@ -0,0 +1,298 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Configuration for MIPS_32
+#
+
+handler-style computed-goto
+handler-size 128
+
+# source for alternate entry stub
+asm-alt-stub mips/alt_stub.S
+
+# file header and basic definitions
+import mips/header.S
+
+# arch-specific entry point to interpreter
+import mips/entry.S
+
+# Stub to switch to alternate interpreter
+fallback-stub mips/fallback.S
+
+# opcode list; argument to op-start is default directory
+op-start mips
+ # (override example:) op OP_SUB_FLOAT_2ADDR arm-vfp
+ # (fallback example:) op OP_SUB_FLOAT_2ADDR FALLBACK
+
+ op op_nop FALLBACK
+ op op_move FALLBACK
+ op op_move_from16 FALLBACK
+ op op_move_16 FALLBACK
+ op op_move_wide FALLBACK
+ op op_move_wide_from16 FALLBACK
+ op op_move_wide_16 FALLBACK
+ op op_move_object FALLBACK
+ op op_move_object_from16 FALLBACK
+ op op_move_object_16 FALLBACK
+ op op_move_result FALLBACK
+ op op_move_result_wide FALLBACK
+ op op_move_result_object FALLBACK
+ op op_move_exception FALLBACK
+ op op_return_void FALLBACK
+ op op_return FALLBACK
+ op op_return_wide FALLBACK
+ op op_return_object FALLBACK
+ op op_const_4 FALLBACK
+ op op_const_16 FALLBACK
+ op op_const FALLBACK
+ op op_const_high16 FALLBACK
+ op op_const_wide_16 FALLBACK
+ op op_const_wide_32 FALLBACK
+ op op_const_wide FALLBACK
+ op op_const_wide_high16 FALLBACK
+ op op_const_string FALLBACK
+ op op_const_string_jumbo FALLBACK
+ op op_const_class FALLBACK
+ op op_monitor_enter FALLBACK
+ op op_monitor_exit FALLBACK
+ op op_check_cast FALLBACK
+ op op_instance_of FALLBACK
+ op op_array_length FALLBACK
+ op op_new_instance FALLBACK
+ op op_new_array FALLBACK
+ op op_filled_new_array FALLBACK
+ op op_filled_new_array_range FALLBACK
+ op op_fill_array_data FALLBACK
+ op op_throw FALLBACK
+ op op_goto FALLBACK
+ op op_goto_16 FALLBACK
+ op op_goto_32 FALLBACK
+ op op_packed_switch FALLBACK
+ op op_sparse_switch FALLBACK
+ op op_cmpl_float FALLBACK
+ op op_cmpg_float FALLBACK
+ op op_cmpl_double FALLBACK
+ op op_cmpg_double FALLBACK
+ op op_cmp_long FALLBACK
+ op op_if_eq FALLBACK
+ op op_if_ne FALLBACK
+ op op_if_lt FALLBACK
+ op op_if_ge FALLBACK
+ op op_if_gt FALLBACK
+ op op_if_le FALLBACK
+ op op_if_eqz FALLBACK
+ op op_if_nez FALLBACK
+ op op_if_ltz FALLBACK
+ op op_if_gez FALLBACK
+ op op_if_gtz FALLBACK
+ op op_if_lez FALLBACK
+ op_unused_3e FALLBACK
+ op_unused_3f FALLBACK
+ op_unused_40 FALLBACK
+ op_unused_41 FALLBACK
+ op_unused_42 FALLBACK
+ op_unused_43 FALLBACK
+ op op_aget FALLBACK
+ op op_aget_wide FALLBACK
+ op op_aget_object FALLBACK
+ op op_aget_boolean FALLBACK
+ op op_aget_byte FALLBACK
+ op op_aget_char FALLBACK
+ op op_aget_short FALLBACK
+ op op_aput FALLBACK
+ op op_aput_wide FALLBACK
+ op op_aput_object FALLBACK
+ op op_aput_boolean FALLBACK
+ op op_aput_byte FALLBACK
+ op op_aput_char FALLBACK
+ op op_aput_short FALLBACK
+ op op_iget FALLBACK
+ op op_iget_wide FALLBACK
+ op op_iget_object FALLBACK
+ op op_iget_boolean FALLBACK
+ op op_iget_byte FALLBACK
+ op op_iget_char FALLBACK
+ op op_iget_short FALLBACK
+ op op_iput FALLBACK
+ op op_iput_wide FALLBACK
+ op op_iput_object FALLBACK
+ op op_iput_boolean FALLBACK
+ op op_iput_byte FALLBACK
+ op op_iput_char FALLBACK
+ op op_iput_short FALLBACK
+ op op_sget FALLBACK
+ op op_sget_wide FALLBACK
+ op op_sget_object FALLBACK
+ op op_sget_boolean FALLBACK
+ op op_sget_byte FALLBACK
+ op op_sget_char FALLBACK
+ op op_sget_short FALLBACK
+ op op_sput FALLBACK
+ op op_sput_wide FALLBACK
+ op op_sput_object FALLBACK
+ op op_sput_boolean FALLBACK
+ op op_sput_byte FALLBACK
+ op op_sput_char FALLBACK
+ op op_sput_short FALLBACK
+ op op_invoke_virtual FALLBACK
+ op op_invoke_super FALLBACK
+ op op_invoke_direct FALLBACK
+ op op_invoke_static FALLBACK
+ op op_invoke_interface FALLBACK
+ op op_return_void_no_barrier FALLBACK
+ op op_invoke_virtual_range FALLBACK
+ op op_invoke_super_range FALLBACK
+ op op_invoke_direct_range FALLBACK
+ op op_invoke_static_range FALLBACK
+ op op_invoke_interface_range FALLBACK
+ op_unused_79 FALLBACK
+ op_unused_7a FALLBACK
+ op op_neg_int FALLBACK
+ op op_not_int FALLBACK
+ op op_neg_long FALLBACK
+ op op_not_long FALLBACK
+ op op_neg_float FALLBACK
+ op op_neg_double FALLBACK
+ op op_int_to_long FALLBACK
+ op op_int_to_float FALLBACK
+ op op_int_to_double FALLBACK
+ op op_long_to_int FALLBACK
+ op op_long_to_float FALLBACK
+ op op_long_to_double FALLBACK
+ op op_float_to_int FALLBACK
+ op op_float_to_long FALLBACK
+ op op_float_to_double FALLBACK
+ op op_double_to_int FALLBACK
+ op op_double_to_long FALLBACK
+ op op_double_to_float FALLBACK
+ op op_int_to_byte FALLBACK
+ op op_int_to_char FALLBACK
+ op op_int_to_short FALLBACK
+ op op_add_int FALLBACK
+ op op_sub_int FALLBACK
+ op op_mul_int FALLBACK
+ op op_div_int FALLBACK
+ op op_rem_int FALLBACK
+ op op_and_int FALLBACK
+ op op_or_int FALLBACK
+ op op_xor_int FALLBACK
+ op op_shl_int FALLBACK
+ op op_shr_int FALLBACK
+ op op_ushr_int FALLBACK
+ op op_add_long FALLBACK
+ op op_sub_long FALLBACK
+ op op_mul_long FALLBACK
+ op op_div_long FALLBACK
+ op op_rem_long FALLBACK
+ op op_and_long FALLBACK
+ op op_or_long FALLBACK
+ op op_xor_long FALLBACK
+ op op_shl_long FALLBACK
+ op op_shr_long FALLBACK
+ op op_ushr_long FALLBACK
+ op op_add_float FALLBACK
+ op op_sub_float FALLBACK
+ op op_mul_float FALLBACK
+ op op_div_float FALLBACK
+ op op_rem_float FALLBACK
+ op op_add_double FALLBACK
+ op op_sub_double FALLBACK
+ op op_mul_double FALLBACK
+ op op_div_double FALLBACK
+ op op_rem_double FALLBACK
+ op op_add_int_2addr FALLBACK
+ op op_sub_int_2addr FALLBACK
+ op op_mul_int_2addr FALLBACK
+ op op_div_int_2addr FALLBACK
+ op op_rem_int_2addr FALLBACK
+ op op_and_int_2addr FALLBACK
+ op op_or_int_2addr FALLBACK
+ op op_xor_int_2addr FALLBACK
+ op op_shl_int_2addr FALLBACK
+ op op_shr_int_2addr FALLBACK
+ op op_ushr_int_2addr FALLBACK
+ op op_add_long_2addr FALLBACK
+ op op_sub_long_2addr FALLBACK
+ op op_mul_long_2addr FALLBACK
+ op op_div_long_2addr FALLBACK
+ op op_rem_long_2addr FALLBACK
+ op op_and_long_2addr FALLBACK
+ op op_or_long_2addr FALLBACK
+ op op_xor_long_2addr FALLBACK
+ op op_shl_long_2addr FALLBACK
+ op op_shr_long_2addr FALLBACK
+ op op_ushr_long_2addr FALLBACK
+ op op_add_float_2addr FALLBACK
+ op op_sub_float_2addr FALLBACK
+ op op_mul_float_2addr FALLBACK
+ op op_div_float_2addr FALLBACK
+ op op_rem_float_2addr FALLBACK
+ op op_add_double_2addr FALLBACK
+ op op_sub_double_2addr FALLBACK
+ op op_mul_double_2addr FALLBACK
+ op op_div_double_2addr FALLBACK
+ op op_rem_double_2addr FALLBACK
+ op op_add_int_lit16 FALLBACK
+ op op_rsub_int FALLBACK
+ op op_mul_int_lit16 FALLBACK
+ op op_div_int_lit16 FALLBACK
+ op op_rem_int_lit16 FALLBACK
+ op op_and_int_lit16 FALLBACK
+ op op_or_int_lit16 FALLBACK
+ op op_xor_int_lit16 FALLBACK
+ op op_add_int_lit8 FALLBACK
+ op op_rsub_int_lit8 FALLBACK
+ op op_mul_int_lit8 FALLBACK
+ op op_div_int_lit8 FALLBACK
+ op op_rem_int_lit8 FALLBACK
+ op op_and_int_lit8 FALLBACK
+ op op_or_int_lit8 FALLBACK
+ op op_xor_int_lit8 FALLBACK
+ op op_shl_int_lit8 FALLBACK
+ op op_shr_int_lit8 FALLBACK
+ op op_ushr_int_lit8 FALLBACK
+ op op_iget_quick FALLBACK
+ op op_iget_wide_quick FALLBACK
+ op op_iget_object_quick FALLBACK
+ op op_iput_quick FALLBACK
+ op op_iput_wide_quick FALLBACK
+ op op_iput_object_quick FALLBACK
+ op op_invoke_virtual_quick FALLBACK
+ op op_invoke_virtual_range_quick FALLBACK
+ op op_iput_boolean_quick FALLBACK
+ op op_iput_byte_quick FALLBACK
+ op op_iput_char_quick FALLBACK
+ op op_iput_short_quick FALLBACK
+ op op_iget_boolean_quick FALLBACK
+ op op_iget_byte_quick FALLBACK
+ op op_iget_char_quick FALLBACK
+ op op_iget_short_quick FALLBACK
+ op_unused_f3 FALLBACK
+ op_unused_f4 FALLBACK
+ op_unused_f5 FALLBACK
+ op_unused_f6 FALLBACK
+ op_unused_f7 FALLBACK
+ op_unused_f8 FALLBACK
+ op_unused_f9 FALLBACK
+ op_unused_fa FALLBACK
+ op_unused_fb FALLBACK
+ op_unused_fc FALLBACK
+ op_unused_fd FALLBACK
+ op_unused_fe FALLBACK
+ op_unused_ff FALLBACK
+op-end
+
+# common subroutines for asm
+import mips/footer.S
diff --git a/runtime/interpreter/mterp/config_mips64 b/runtime/interpreter/mterp/config_mips64
new file mode 100644
index 0000000..f804ce5
--- /dev/null
+++ b/runtime/interpreter/mterp/config_mips64
@@ -0,0 +1,298 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Configuration for MIPS_64
+#
+
+handler-style computed-goto
+handler-size 128
+
+# source for alternate entry stub
+asm-alt-stub mips64/alt_stub.S
+
+# file header and basic definitions
+import mips64/header.S
+
+# arch-specific entry point to interpreter
+import mips64/entry.S
+
+# Stub to switch to alternate interpreter
+fallback-stub mips64/fallback.S
+
+# opcode list; argument to op-start is default directory
+op-start mips64
+ # (override example:) op OP_SUB_FLOAT_2ADDR arm-vfp
+ # (fallback example:) op OP_SUB_FLOAT_2ADDR FALLBACK
+
+ op op_nop FALLBACK
+ op op_move FALLBACK
+ op op_move_from16 FALLBACK
+ op op_move_16 FALLBACK
+ op op_move_wide FALLBACK
+ op op_move_wide_from16 FALLBACK
+ op op_move_wide_16 FALLBACK
+ op op_move_object FALLBACK
+ op op_move_object_from16 FALLBACK
+ op op_move_object_16 FALLBACK
+ op op_move_result FALLBACK
+ op op_move_result_wide FALLBACK
+ op op_move_result_object FALLBACK
+ op op_move_exception FALLBACK
+ op op_return_void FALLBACK
+ op op_return FALLBACK
+ op op_return_wide FALLBACK
+ op op_return_object FALLBACK
+ op op_const_4 FALLBACK
+ op op_const_16 FALLBACK
+ op op_const FALLBACK
+ op op_const_high16 FALLBACK
+ op op_const_wide_16 FALLBACK
+ op op_const_wide_32 FALLBACK
+ op op_const_wide FALLBACK
+ op op_const_wide_high16 FALLBACK
+ op op_const_string FALLBACK
+ op op_const_string_jumbo FALLBACK
+ op op_const_class FALLBACK
+ op op_monitor_enter FALLBACK
+ op op_monitor_exit FALLBACK
+ op op_check_cast FALLBACK
+ op op_instance_of FALLBACK
+ op op_array_length FALLBACK
+ op op_new_instance FALLBACK
+ op op_new_array FALLBACK
+ op op_filled_new_array FALLBACK
+ op op_filled_new_array_range FALLBACK
+ op op_fill_array_data FALLBACK
+ op op_throw FALLBACK
+ op op_goto FALLBACK
+ op op_goto_16 FALLBACK
+ op op_goto_32 FALLBACK
+ op op_packed_switch FALLBACK
+ op op_sparse_switch FALLBACK
+ op op_cmpl_float FALLBACK
+ op op_cmpg_float FALLBACK
+ op op_cmpl_double FALLBACK
+ op op_cmpg_double FALLBACK
+ op op_cmp_long FALLBACK
+ op op_if_eq FALLBACK
+ op op_if_ne FALLBACK
+ op op_if_lt FALLBACK
+ op op_if_ge FALLBACK
+ op op_if_gt FALLBACK
+ op op_if_le FALLBACK
+ op op_if_eqz FALLBACK
+ op op_if_nez FALLBACK
+ op op_if_ltz FALLBACK
+ op op_if_gez FALLBACK
+ op op_if_gtz FALLBACK
+ op op_if_lez FALLBACK
+ op_unused_3e FALLBACK
+ op_unused_3f FALLBACK
+ op_unused_40 FALLBACK
+ op_unused_41 FALLBACK
+ op_unused_42 FALLBACK
+ op_unused_43 FALLBACK
+ op op_aget FALLBACK
+ op op_aget_wide FALLBACK
+ op op_aget_object FALLBACK
+ op op_aget_boolean FALLBACK
+ op op_aget_byte FALLBACK
+ op op_aget_char FALLBACK
+ op op_aget_short FALLBACK
+ op op_aput FALLBACK
+ op op_aput_wide FALLBACK
+ op op_aput_object FALLBACK
+ op op_aput_boolean FALLBACK
+ op op_aput_byte FALLBACK
+ op op_aput_char FALLBACK
+ op op_aput_short FALLBACK
+ op op_iget FALLBACK
+ op op_iget_wide FALLBACK
+ op op_iget_object FALLBACK
+ op op_iget_boolean FALLBACK
+ op op_iget_byte FALLBACK
+ op op_iget_char FALLBACK
+ op op_iget_short FALLBACK
+ op op_iput FALLBACK
+ op op_iput_wide FALLBACK
+ op op_iput_object FALLBACK
+ op op_iput_boolean FALLBACK
+ op op_iput_byte FALLBACK
+ op op_iput_char FALLBACK
+ op op_iput_short FALLBACK
+ op op_sget FALLBACK
+ op op_sget_wide FALLBACK
+ op op_sget_object FALLBACK
+ op op_sget_boolean FALLBACK
+ op op_sget_byte FALLBACK
+ op op_sget_char FALLBACK
+ op op_sget_short FALLBACK
+ op op_sput FALLBACK
+ op op_sput_wide FALLBACK
+ op op_sput_object FALLBACK
+ op op_sput_boolean FALLBACK
+ op op_sput_byte FALLBACK
+ op op_sput_char FALLBACK
+ op op_sput_short FALLBACK
+ op op_invoke_virtual FALLBACK
+ op op_invoke_super FALLBACK
+ op op_invoke_direct FALLBACK
+ op op_invoke_static FALLBACK
+ op op_invoke_interface FALLBACK
+ op op_return_void_no_barrier FALLBACK
+ op op_invoke_virtual_range FALLBACK
+ op op_invoke_super_range FALLBACK
+ op op_invoke_direct_range FALLBACK
+ op op_invoke_static_range FALLBACK
+ op op_invoke_interface_range FALLBACK
+ op_unused_79 FALLBACK
+ op_unused_7a FALLBACK
+ op op_neg_int FALLBACK
+ op op_not_int FALLBACK
+ op op_neg_long FALLBACK
+ op op_not_long FALLBACK
+ op op_neg_float FALLBACK
+ op op_neg_double FALLBACK
+ op op_int_to_long FALLBACK
+ op op_int_to_float FALLBACK
+ op op_int_to_double FALLBACK
+ op op_long_to_int FALLBACK
+ op op_long_to_float FALLBACK
+ op op_long_to_double FALLBACK
+ op op_float_to_int FALLBACK
+ op op_float_to_long FALLBACK
+ op op_float_to_double FALLBACK
+ op op_double_to_int FALLBACK
+ op op_double_to_long FALLBACK
+ op op_double_to_float FALLBACK
+ op op_int_to_byte FALLBACK
+ op op_int_to_char FALLBACK
+ op op_int_to_short FALLBACK
+ op op_add_int FALLBACK
+ op op_sub_int FALLBACK
+ op op_mul_int FALLBACK
+ op op_div_int FALLBACK
+ op op_rem_int FALLBACK
+ op op_and_int FALLBACK
+ op op_or_int FALLBACK
+ op op_xor_int FALLBACK
+ op op_shl_int FALLBACK
+ op op_shr_int FALLBACK
+ op op_ushr_int FALLBACK
+ op op_add_long FALLBACK
+ op op_sub_long FALLBACK
+ op op_mul_long FALLBACK
+ op op_div_long FALLBACK
+ op op_rem_long FALLBACK
+ op op_and_long FALLBACK
+ op op_or_long FALLBACK
+ op op_xor_long FALLBACK
+ op op_shl_long FALLBACK
+ op op_shr_long FALLBACK
+ op op_ushr_long FALLBACK
+ op op_add_float FALLBACK
+ op op_sub_float FALLBACK
+ op op_mul_float FALLBACK
+ op op_div_float FALLBACK
+ op op_rem_float FALLBACK
+ op op_add_double FALLBACK
+ op op_sub_double FALLBACK
+ op op_mul_double FALLBACK
+ op op_div_double FALLBACK
+ op op_rem_double FALLBACK
+ op op_add_int_2addr FALLBACK
+ op op_sub_int_2addr FALLBACK
+ op op_mul_int_2addr FALLBACK
+ op op_div_int_2addr FALLBACK
+ op op_rem_int_2addr FALLBACK
+ op op_and_int_2addr FALLBACK
+ op op_or_int_2addr FALLBACK
+ op op_xor_int_2addr FALLBACK
+ op op_shl_int_2addr FALLBACK
+ op op_shr_int_2addr FALLBACK
+ op op_ushr_int_2addr FALLBACK
+ op op_add_long_2addr FALLBACK
+ op op_sub_long_2addr FALLBACK
+ op op_mul_long_2addr FALLBACK
+ op op_div_long_2addr FALLBACK
+ op op_rem_long_2addr FALLBACK
+ op op_and_long_2addr FALLBACK
+ op op_or_long_2addr FALLBACK
+ op op_xor_long_2addr FALLBACK
+ op op_shl_long_2addr FALLBACK
+ op op_shr_long_2addr FALLBACK
+ op op_ushr_long_2addr FALLBACK
+ op op_add_float_2addr FALLBACK
+ op op_sub_float_2addr FALLBACK
+ op op_mul_float_2addr FALLBACK
+ op op_div_float_2addr FALLBACK
+ op op_rem_float_2addr FALLBACK
+ op op_add_double_2addr FALLBACK
+ op op_sub_double_2addr FALLBACK
+ op op_mul_double_2addr FALLBACK
+ op op_div_double_2addr FALLBACK
+ op op_rem_double_2addr FALLBACK
+ op op_add_int_lit16 FALLBACK
+ op op_rsub_int FALLBACK
+ op op_mul_int_lit16 FALLBACK
+ op op_div_int_lit16 FALLBACK
+ op op_rem_int_lit16 FALLBACK
+ op op_and_int_lit16 FALLBACK
+ op op_or_int_lit16 FALLBACK
+ op op_xor_int_lit16 FALLBACK
+ op op_add_int_lit8 FALLBACK
+ op op_rsub_int_lit8 FALLBACK
+ op op_mul_int_lit8 FALLBACK
+ op op_div_int_lit8 FALLBACK
+ op op_rem_int_lit8 FALLBACK
+ op op_and_int_lit8 FALLBACK
+ op op_or_int_lit8 FALLBACK
+ op op_xor_int_lit8 FALLBACK
+ op op_shl_int_lit8 FALLBACK
+ op op_shr_int_lit8 FALLBACK
+ op op_ushr_int_lit8 FALLBACK
+ op op_iget_quick FALLBACK
+ op op_iget_wide_quick FALLBACK
+ op op_iget_object_quick FALLBACK
+ op op_iput_quick FALLBACK
+ op op_iput_wide_quick FALLBACK
+ op op_iput_object_quick FALLBACK
+ op op_invoke_virtual_quick FALLBACK
+ op op_invoke_virtual_range_quick FALLBACK
+ op op_iput_boolean_quick FALLBACK
+ op op_iput_byte_quick FALLBACK
+ op op_iput_char_quick FALLBACK
+ op op_iput_short_quick FALLBACK
+ op op_iget_boolean_quick FALLBACK
+ op op_iget_byte_quick FALLBACK
+ op op_iget_char_quick FALLBACK
+ op op_iget_short_quick FALLBACK
+ op_unused_f3 FALLBACK
+ op_unused_f4 FALLBACK
+ op_unused_f5 FALLBACK
+ op_unused_f6 FALLBACK
+ op_unused_f7 FALLBACK
+ op_unused_f8 FALLBACK
+ op_unused_f9 FALLBACK
+ op_unused_fa FALLBACK
+ op_unused_fb FALLBACK
+ op_unused_fc FALLBACK
+ op_unused_fd FALLBACK
+ op_unused_fe FALLBACK
+ op_unused_ff FALLBACK
+op-end
+
+# common subroutines for asm
+import mips64/footer.S
diff --git a/runtime/interpreter/mterp/config_x86 b/runtime/interpreter/mterp/config_x86
new file mode 100644
index 0000000..277817d
--- /dev/null
+++ b/runtime/interpreter/mterp/config_x86
@@ -0,0 +1,298 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Configuration for X86
+#
+
+handler-style computed-goto
+handler-size 128
+
+# source for alternate entry stub
+asm-alt-stub x86/alt_stub.S
+
+# file header and basic definitions
+import x86/header.S
+
+# arch-specific entry point to interpreter
+import x86/entry.S
+
+# Stub to switch to alternate interpreter
+fallback-stub x86/fallback.S
+
+# opcode list; argument to op-start is default directory
+op-start x86
+ # (override example:) op OP_SUB_FLOAT_2ADDR arm-vfp
+ # (fallback example:) op OP_SUB_FLOAT_2ADDR FALLBACK
+
+ op op_nop FALLBACK
+ op op_move FALLBACK
+ op op_move_from16 FALLBACK
+ op op_move_16 FALLBACK
+ op op_move_wide FALLBACK
+ op op_move_wide_from16 FALLBACK
+ op op_move_wide_16 FALLBACK
+ op op_move_object FALLBACK
+ op op_move_object_from16 FALLBACK
+ op op_move_object_16 FALLBACK
+ op op_move_result FALLBACK
+ op op_move_result_wide FALLBACK
+ op op_move_result_object FALLBACK
+ op op_move_exception FALLBACK
+ op op_return_void FALLBACK
+ op op_return FALLBACK
+ op op_return_wide FALLBACK
+ op op_return_object FALLBACK
+ op op_const_4 FALLBACK
+ op op_const_16 FALLBACK
+ op op_const FALLBACK
+ op op_const_high16 FALLBACK
+ op op_const_wide_16 FALLBACK
+ op op_const_wide_32 FALLBACK
+ op op_const_wide FALLBACK
+ op op_const_wide_high16 FALLBACK
+ op op_const_string FALLBACK
+ op op_const_string_jumbo FALLBACK
+ op op_const_class FALLBACK
+ op op_monitor_enter FALLBACK
+ op op_monitor_exit FALLBACK
+ op op_check_cast FALLBACK
+ op op_instance_of FALLBACK
+ op op_array_length FALLBACK
+ op op_new_instance FALLBACK
+ op op_new_array FALLBACK
+ op op_filled_new_array FALLBACK
+ op op_filled_new_array_range FALLBACK
+ op op_fill_array_data FALLBACK
+ op op_throw FALLBACK
+ op op_goto FALLBACK
+ op op_goto_16 FALLBACK
+ op op_goto_32 FALLBACK
+ op op_packed_switch FALLBACK
+ op op_sparse_switch FALLBACK
+ op op_cmpl_float FALLBACK
+ op op_cmpg_float FALLBACK
+ op op_cmpl_double FALLBACK
+ op op_cmpg_double FALLBACK
+ op op_cmp_long FALLBACK
+ op op_if_eq FALLBACK
+ op op_if_ne FALLBACK
+ op op_if_lt FALLBACK
+ op op_if_ge FALLBACK
+ op op_if_gt FALLBACK
+ op op_if_le FALLBACK
+ op op_if_eqz FALLBACK
+ op op_if_nez FALLBACK
+ op op_if_ltz FALLBACK
+ op op_if_gez FALLBACK
+ op op_if_gtz FALLBACK
+ op op_if_lez FALLBACK
+ op_unused_3e FALLBACK
+ op_unused_3f FALLBACK
+ op_unused_40 FALLBACK
+ op_unused_41 FALLBACK
+ op_unused_42 FALLBACK
+ op_unused_43 FALLBACK
+ op op_aget FALLBACK
+ op op_aget_wide FALLBACK
+ op op_aget_object FALLBACK
+ op op_aget_boolean FALLBACK
+ op op_aget_byte FALLBACK
+ op op_aget_char FALLBACK
+ op op_aget_short FALLBACK
+ op op_aput FALLBACK
+ op op_aput_wide FALLBACK
+ op op_aput_object FALLBACK
+ op op_aput_boolean FALLBACK
+ op op_aput_byte FALLBACK
+ op op_aput_char FALLBACK
+ op op_aput_short FALLBACK
+ op op_iget FALLBACK
+ op op_iget_wide FALLBACK
+ op op_iget_object FALLBACK
+ op op_iget_boolean FALLBACK
+ op op_iget_byte FALLBACK
+ op op_iget_char FALLBACK
+ op op_iget_short FALLBACK
+ op op_iput FALLBACK
+ op op_iput_wide FALLBACK
+ op op_iput_object FALLBACK
+ op op_iput_boolean FALLBACK
+ op op_iput_byte FALLBACK
+ op op_iput_char FALLBACK
+ op op_iput_short FALLBACK
+ op op_sget FALLBACK
+ op op_sget_wide FALLBACK
+ op op_sget_object FALLBACK
+ op op_sget_boolean FALLBACK
+ op op_sget_byte FALLBACK
+ op op_sget_char FALLBACK
+ op op_sget_short FALLBACK
+ op op_sput FALLBACK
+ op op_sput_wide FALLBACK
+ op op_sput_object FALLBACK
+ op op_sput_boolean FALLBACK
+ op op_sput_byte FALLBACK
+ op op_sput_char FALLBACK
+ op op_sput_short FALLBACK
+ op op_invoke_virtual FALLBACK
+ op op_invoke_super FALLBACK
+ op op_invoke_direct FALLBACK
+ op op_invoke_static FALLBACK
+ op op_invoke_interface FALLBACK
+ op op_return_void_no_barrier FALLBACK
+ op op_invoke_virtual_range FALLBACK
+ op op_invoke_super_range FALLBACK
+ op op_invoke_direct_range FALLBACK
+ op op_invoke_static_range FALLBACK
+ op op_invoke_interface_range FALLBACK
+ op_unused_79 FALLBACK
+ op_unused_7a FALLBACK
+ op op_neg_int FALLBACK
+ op op_not_int FALLBACK
+ op op_neg_long FALLBACK
+ op op_not_long FALLBACK
+ op op_neg_float FALLBACK
+ op op_neg_double FALLBACK
+ op op_int_to_long FALLBACK
+ op op_int_to_float FALLBACK
+ op op_int_to_double FALLBACK
+ op op_long_to_int FALLBACK
+ op op_long_to_float FALLBACK
+ op op_long_to_double FALLBACK
+ op op_float_to_int FALLBACK
+ op op_float_to_long FALLBACK
+ op op_float_to_double FALLBACK
+ op op_double_to_int FALLBACK
+ op op_double_to_long FALLBACK
+ op op_double_to_float FALLBACK
+ op op_int_to_byte FALLBACK
+ op op_int_to_char FALLBACK
+ op op_int_to_short FALLBACK
+ op op_add_int FALLBACK
+ op op_sub_int FALLBACK
+ op op_mul_int FALLBACK
+ op op_div_int FALLBACK
+ op op_rem_int FALLBACK
+ op op_and_int FALLBACK
+ op op_or_int FALLBACK
+ op op_xor_int FALLBACK
+ op op_shl_int FALLBACK
+ op op_shr_int FALLBACK
+ op op_ushr_int FALLBACK
+ op op_add_long FALLBACK
+ op op_sub_long FALLBACK
+ op op_mul_long FALLBACK
+ op op_div_long FALLBACK
+ op op_rem_long FALLBACK
+ op op_and_long FALLBACK
+ op op_or_long FALLBACK
+ op op_xor_long FALLBACK
+ op op_shl_long FALLBACK
+ op op_shr_long FALLBACK
+ op op_ushr_long FALLBACK
+ op op_add_float FALLBACK
+ op op_sub_float FALLBACK
+ op op_mul_float FALLBACK
+ op op_div_float FALLBACK
+ op op_rem_float FALLBACK
+ op op_add_double FALLBACK
+ op op_sub_double FALLBACK
+ op op_mul_double FALLBACK
+ op op_div_double FALLBACK
+ op op_rem_double FALLBACK
+ op op_add_int_2addr FALLBACK
+ op op_sub_int_2addr FALLBACK
+ op op_mul_int_2addr FALLBACK
+ op op_div_int_2addr FALLBACK
+ op op_rem_int_2addr FALLBACK
+ op op_and_int_2addr FALLBACK
+ op op_or_int_2addr FALLBACK
+ op op_xor_int_2addr FALLBACK
+ op op_shl_int_2addr FALLBACK
+ op op_shr_int_2addr FALLBACK
+ op op_ushr_int_2addr FALLBACK
+ op op_add_long_2addr FALLBACK
+ op op_sub_long_2addr FALLBACK
+ op op_mul_long_2addr FALLBACK
+ op op_div_long_2addr FALLBACK
+ op op_rem_long_2addr FALLBACK
+ op op_and_long_2addr FALLBACK
+ op op_or_long_2addr FALLBACK
+ op op_xor_long_2addr FALLBACK
+ op op_shl_long_2addr FALLBACK
+ op op_shr_long_2addr FALLBACK
+ op op_ushr_long_2addr FALLBACK
+ op op_add_float_2addr FALLBACK
+ op op_sub_float_2addr FALLBACK
+ op op_mul_float_2addr FALLBACK
+ op op_div_float_2addr FALLBACK
+ op op_rem_float_2addr FALLBACK
+ op op_add_double_2addr FALLBACK
+ op op_sub_double_2addr FALLBACK
+ op op_mul_double_2addr FALLBACK
+ op op_div_double_2addr FALLBACK
+ op op_rem_double_2addr FALLBACK
+ op op_add_int_lit16 FALLBACK
+ op op_rsub_int FALLBACK
+ op op_mul_int_lit16 FALLBACK
+ op op_div_int_lit16 FALLBACK
+ op op_rem_int_lit16 FALLBACK
+ op op_and_int_lit16 FALLBACK
+ op op_or_int_lit16 FALLBACK
+ op op_xor_int_lit16 FALLBACK
+ op op_add_int_lit8 FALLBACK
+ op op_rsub_int_lit8 FALLBACK
+ op op_mul_int_lit8 FALLBACK
+ op op_div_int_lit8 FALLBACK
+ op op_rem_int_lit8 FALLBACK
+ op op_and_int_lit8 FALLBACK
+ op op_or_int_lit8 FALLBACK
+ op op_xor_int_lit8 FALLBACK
+ op op_shl_int_lit8 FALLBACK
+ op op_shr_int_lit8 FALLBACK
+ op op_ushr_int_lit8 FALLBACK
+ op op_iget_quick FALLBACK
+ op op_iget_wide_quick FALLBACK
+ op op_iget_object_quick FALLBACK
+ op op_iput_quick FALLBACK
+ op op_iput_wide_quick FALLBACK
+ op op_iput_object_quick FALLBACK
+ op op_invoke_virtual_quick FALLBACK
+ op op_invoke_virtual_range_quick FALLBACK
+ op op_iput_boolean_quick FALLBACK
+ op op_iput_byte_quick FALLBACK
+ op op_iput_char_quick FALLBACK
+ op op_iput_short_quick FALLBACK
+ op op_iget_boolean_quick FALLBACK
+ op op_iget_byte_quick FALLBACK
+ op op_iget_char_quick FALLBACK
+ op op_iget_short_quick FALLBACK
+ op_unused_f3 FALLBACK
+ op_unused_f4 FALLBACK
+ op_unused_f5 FALLBACK
+ op_unused_f6 FALLBACK
+ op_unused_f7 FALLBACK
+ op_unused_f8 FALLBACK
+ op_unused_f9 FALLBACK
+ op_unused_fa FALLBACK
+ op_unused_fb FALLBACK
+ op_unused_fc FALLBACK
+ op_unused_fd FALLBACK
+ op_unused_fe FALLBACK
+ op_unused_ff FALLBACK
+op-end
+
+# common subroutines for asm
+import x86/footer.S
diff --git a/runtime/interpreter/mterp/config_x86_64 b/runtime/interpreter/mterp/config_x86_64
new file mode 100644
index 0000000..a002dc2
--- /dev/null
+++ b/runtime/interpreter/mterp/config_x86_64
@@ -0,0 +1,298 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Configuration for X86_64
+#
+
+handler-style computed-goto
+handler-size 128
+
+# source for alternate entry stub
+asm-alt-stub x86_64/alt_stub.S
+
+# file header and basic definitions
+import x86_64/header.S
+
+# arch-specific entry point to interpreter
+import x86_64/entry.S
+
+# Stub to switch to alternate interpreter
+fallback-stub x86_64/fallback.S
+
+# opcode list; argument to op-start is default directory
+op-start x86_64
+ # (override example:) op OP_SUB_FLOAT_2ADDR arm-vfp
+ # (fallback example:) op OP_SUB_FLOAT_2ADDR FALLBACK
+
+ op op_nop FALLBACK
+ op op_move FALLBACK
+ op op_move_from16 FALLBACK
+ op op_move_16 FALLBACK
+ op op_move_wide FALLBACK
+ op op_move_wide_from16 FALLBACK
+ op op_move_wide_16 FALLBACK
+ op op_move_object FALLBACK
+ op op_move_object_from16 FALLBACK
+ op op_move_object_16 FALLBACK
+ op op_move_result FALLBACK
+ op op_move_result_wide FALLBACK
+ op op_move_result_object FALLBACK
+ op op_move_exception FALLBACK
+ op op_return_void FALLBACK
+ op op_return FALLBACK
+ op op_return_wide FALLBACK
+ op op_return_object FALLBACK
+ op op_const_4 FALLBACK
+ op op_const_16 FALLBACK
+ op op_const FALLBACK
+ op op_const_high16 FALLBACK
+ op op_const_wide_16 FALLBACK
+ op op_const_wide_32 FALLBACK
+ op op_const_wide FALLBACK
+ op op_const_wide_high16 FALLBACK
+ op op_const_string FALLBACK
+ op op_const_string_jumbo FALLBACK
+ op op_const_class FALLBACK
+ op op_monitor_enter FALLBACK
+ op op_monitor_exit FALLBACK
+ op op_check_cast FALLBACK
+ op op_instance_of FALLBACK
+ op op_array_length FALLBACK
+ op op_new_instance FALLBACK
+ op op_new_array FALLBACK
+ op op_filled_new_array FALLBACK
+ op op_filled_new_array_range FALLBACK
+ op op_fill_array_data FALLBACK
+ op op_throw FALLBACK
+ op op_goto FALLBACK
+ op op_goto_16 FALLBACK
+ op op_goto_32 FALLBACK
+ op op_packed_switch FALLBACK
+ op op_sparse_switch FALLBACK
+ op op_cmpl_float FALLBACK
+ op op_cmpg_float FALLBACK
+ op op_cmpl_double FALLBACK
+ op op_cmpg_double FALLBACK
+ op op_cmp_long FALLBACK
+ op op_if_eq FALLBACK
+ op op_if_ne FALLBACK
+ op op_if_lt FALLBACK
+ op op_if_ge FALLBACK
+ op op_if_gt FALLBACK
+ op op_if_le FALLBACK
+ op op_if_eqz FALLBACK
+ op op_if_nez FALLBACK
+ op op_if_ltz FALLBACK
+ op op_if_gez FALLBACK
+ op op_if_gtz FALLBACK
+ op op_if_lez FALLBACK
+ op_unused_3e FALLBACK
+ op_unused_3f FALLBACK
+ op_unused_40 FALLBACK
+ op_unused_41 FALLBACK
+ op_unused_42 FALLBACK
+ op_unused_43 FALLBACK
+ op op_aget FALLBACK
+ op op_aget_wide FALLBACK
+ op op_aget_object FALLBACK
+ op op_aget_boolean FALLBACK
+ op op_aget_byte FALLBACK
+ op op_aget_char FALLBACK
+ op op_aget_short FALLBACK
+ op op_aput FALLBACK
+ op op_aput_wide FALLBACK
+ op op_aput_object FALLBACK
+ op op_aput_boolean FALLBACK
+ op op_aput_byte FALLBACK
+ op op_aput_char FALLBACK
+ op op_aput_short FALLBACK
+ op op_iget FALLBACK
+ op op_iget_wide FALLBACK
+ op op_iget_object FALLBACK
+ op op_iget_boolean FALLBACK
+ op op_iget_byte FALLBACK
+ op op_iget_char FALLBACK
+ op op_iget_short FALLBACK
+ op op_iput FALLBACK
+ op op_iput_wide FALLBACK
+ op op_iput_object FALLBACK
+ op op_iput_boolean FALLBACK
+ op op_iput_byte FALLBACK
+ op op_iput_char FALLBACK
+ op op_iput_short FALLBACK
+ op op_sget FALLBACK
+ op op_sget_wide FALLBACK
+ op op_sget_object FALLBACK
+ op op_sget_boolean FALLBACK
+ op op_sget_byte FALLBACK
+ op op_sget_char FALLBACK
+ op op_sget_short FALLBACK
+ op op_sput FALLBACK
+ op op_sput_wide FALLBACK
+ op op_sput_object FALLBACK
+ op op_sput_boolean FALLBACK
+ op op_sput_byte FALLBACK
+ op op_sput_char FALLBACK
+ op op_sput_short FALLBACK
+ op op_invoke_virtual FALLBACK
+ op op_invoke_super FALLBACK
+ op op_invoke_direct FALLBACK
+ op op_invoke_static FALLBACK
+ op op_invoke_interface FALLBACK
+ op op_return_void_no_barrier FALLBACK
+ op op_invoke_virtual_range FALLBACK
+ op op_invoke_super_range FALLBACK
+ op op_invoke_direct_range FALLBACK
+ op op_invoke_static_range FALLBACK
+ op op_invoke_interface_range FALLBACK
+ op_unused_79 FALLBACK
+ op_unused_7a FALLBACK
+ op op_neg_int FALLBACK
+ op op_not_int FALLBACK
+ op op_neg_long FALLBACK
+ op op_not_long FALLBACK
+ op op_neg_float FALLBACK
+ op op_neg_double FALLBACK
+ op op_int_to_long FALLBACK
+ op op_int_to_float FALLBACK
+ op op_int_to_double FALLBACK
+ op op_long_to_int FALLBACK
+ op op_long_to_float FALLBACK
+ op op_long_to_double FALLBACK
+ op op_float_to_int FALLBACK
+ op op_float_to_long FALLBACK
+ op op_float_to_double FALLBACK
+ op op_double_to_int FALLBACK
+ op op_double_to_long FALLBACK
+ op op_double_to_float FALLBACK
+ op op_int_to_byte FALLBACK
+ op op_int_to_char FALLBACK
+ op op_int_to_short FALLBACK
+ op op_add_int FALLBACK
+ op op_sub_int FALLBACK
+ op op_mul_int FALLBACK
+ op op_div_int FALLBACK
+ op op_rem_int FALLBACK
+ op op_and_int FALLBACK
+ op op_or_int FALLBACK
+ op op_xor_int FALLBACK
+ op op_shl_int FALLBACK
+ op op_shr_int FALLBACK
+ op op_ushr_int FALLBACK
+ op op_add_long FALLBACK
+ op op_sub_long FALLBACK
+ op op_mul_long FALLBACK
+ op op_div_long FALLBACK
+ op op_rem_long FALLBACK
+ op op_and_long FALLBACK
+ op op_or_long FALLBACK
+ op op_xor_long FALLBACK
+ op op_shl_long FALLBACK
+ op op_shr_long FALLBACK
+ op op_ushr_long FALLBACK
+ op op_add_float FALLBACK
+ op op_sub_float FALLBACK
+ op op_mul_float FALLBACK
+ op op_div_float FALLBACK
+ op op_rem_float FALLBACK
+ op op_add_double FALLBACK
+ op op_sub_double FALLBACK
+ op op_mul_double FALLBACK
+ op op_div_double FALLBACK
+ op op_rem_double FALLBACK
+ op op_add_int_2addr FALLBACK
+ op op_sub_int_2addr FALLBACK
+ op op_mul_int_2addr FALLBACK
+ op op_div_int_2addr FALLBACK
+ op op_rem_int_2addr FALLBACK
+ op op_and_int_2addr FALLBACK
+ op op_or_int_2addr FALLBACK
+ op op_xor_int_2addr FALLBACK
+ op op_shl_int_2addr FALLBACK
+ op op_shr_int_2addr FALLBACK
+ op op_ushr_int_2addr FALLBACK
+ op op_add_long_2addr FALLBACK
+ op op_sub_long_2addr FALLBACK
+ op op_mul_long_2addr FALLBACK
+ op op_div_long_2addr FALLBACK
+ op op_rem_long_2addr FALLBACK
+ op op_and_long_2addr FALLBACK
+ op op_or_long_2addr FALLBACK
+ op op_xor_long_2addr FALLBACK
+ op op_shl_long_2addr FALLBACK
+ op op_shr_long_2addr FALLBACK
+ op op_ushr_long_2addr FALLBACK
+ op op_add_float_2addr FALLBACK
+ op op_sub_float_2addr FALLBACK
+ op op_mul_float_2addr FALLBACK
+ op op_div_float_2addr FALLBACK
+ op op_rem_float_2addr FALLBACK
+ op op_add_double_2addr FALLBACK
+ op op_sub_double_2addr FALLBACK
+ op op_mul_double_2addr FALLBACK
+ op op_div_double_2addr FALLBACK
+ op op_rem_double_2addr FALLBACK
+ op op_add_int_lit16 FALLBACK
+ op op_rsub_int FALLBACK
+ op op_mul_int_lit16 FALLBACK
+ op op_div_int_lit16 FALLBACK
+ op op_rem_int_lit16 FALLBACK
+ op op_and_int_lit16 FALLBACK
+ op op_or_int_lit16 FALLBACK
+ op op_xor_int_lit16 FALLBACK
+ op op_add_int_lit8 FALLBACK
+ op op_rsub_int_lit8 FALLBACK
+ op op_mul_int_lit8 FALLBACK
+ op op_div_int_lit8 FALLBACK
+ op op_rem_int_lit8 FALLBACK
+ op op_and_int_lit8 FALLBACK
+ op op_or_int_lit8 FALLBACK
+ op op_xor_int_lit8 FALLBACK
+ op op_shl_int_lit8 FALLBACK
+ op op_shr_int_lit8 FALLBACK
+ op op_ushr_int_lit8 FALLBACK
+ op op_iget_quick FALLBACK
+ op op_iget_wide_quick FALLBACK
+ op op_iget_object_quick FALLBACK
+ op op_iput_quick FALLBACK
+ op op_iput_wide_quick FALLBACK
+ op op_iput_object_quick FALLBACK
+ op op_invoke_virtual_quick FALLBACK
+ op op_invoke_virtual_range_quick FALLBACK
+ op op_iput_boolean_quick FALLBACK
+ op op_iput_byte_quick FALLBACK
+ op op_iput_char_quick FALLBACK
+ op op_iput_short_quick FALLBACK
+ op op_iget_boolean_quick FALLBACK
+ op op_iget_byte_quick FALLBACK
+ op op_iget_char_quick FALLBACK
+ op op_iget_short_quick FALLBACK
+ op_unused_f3 FALLBACK
+ op_unused_f4 FALLBACK
+ op_unused_f5 FALLBACK
+ op_unused_f6 FALLBACK
+ op_unused_f7 FALLBACK
+ op_unused_f8 FALLBACK
+ op_unused_f9 FALLBACK
+ op_unused_fa FALLBACK
+ op_unused_fb FALLBACK
+ op_unused_fc FALLBACK
+ op_unused_fd FALLBACK
+ op_unused_fe FALLBACK
+ op_unused_ff FALLBACK
+op-end
+
+# common subroutines for asm
+import x86_64/footer.S
diff --git a/runtime/interpreter/mterp/gen_mterp.py b/runtime/interpreter/mterp/gen_mterp.py
new file mode 100755
index 0000000..f56d8bd
--- /dev/null
+++ b/runtime/interpreter/mterp/gen_mterp.py
@@ -0,0 +1,602 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Using instructions from an architecture-specific config file, generate C
+# and assembly source files for the Dalvik interpreter.
+#
+
+import sys, string, re, time
+from string import Template
+
+interp_defs_file = "../../dex_instruction_list.h" # need opcode list
+kNumPackedOpcodes = 256
+
+splitops = False
+verbose = False
+handler_size_bits = -1000
+handler_size_bytes = -1000
+in_op_start = 0 # 0=not started, 1=started, 2=ended
+in_alt_op_start = 0 # 0=not started, 1=started, 2=ended
+default_op_dir = None
+default_alt_stub = None
+opcode_locations = {}
+alt_opcode_locations = {}
+asm_stub_text = []
+fallback_stub_text = []
+label_prefix = ".L" # use ".L" to hide labels from gdb
+alt_label_prefix = ".L_ALT" # use ".L" to hide labels from gdb
+style = None # interpreter style
+generate_alt_table = False
+
+# Exception class.
+class DataParseError(SyntaxError):
+ "Failure when parsing data file"
+
+#
+# Set any omnipresent substitution values.
+#
+def getGlobalSubDict():
+ return { "handler_size_bits":handler_size_bits,
+ "handler_size_bytes":handler_size_bytes }
+
+#
+# Parse arch config file --
+# Set interpreter style.
+#
+def setHandlerStyle(tokens):
+ global style
+ if len(tokens) != 2:
+ raise DataParseError("handler-style requires one argument")
+ style = tokens[1]
+ if style != "computed-goto":
+ raise DataParseError("handler-style (%s) invalid" % style)
+
+#
+# Parse arch config file --
+# Set handler_size_bytes to the value of tokens[1], and handler_size_bits to
+# log2(handler_size_bytes). Throws an exception if "bytes" is not 0 or
+# a power of two.
+#
+def setHandlerSize(tokens):
+ global handler_size_bits, handler_size_bytes
+ if style != "computed-goto":
+ print "Warning: handler-size valid only for computed-goto interpreters"
+ if len(tokens) != 2:
+ raise DataParseError("handler-size requires one argument")
+ if handler_size_bits != -1000:
+ raise DataParseError("handler-size may only be set once")
+
+ # compute log2(n), and make sure n is 0 or a power of 2
+ handler_size_bytes = bytes = int(tokens[1])
+ bits = -1
+ while bytes > 0:
+ bytes //= 2 # halve with truncating division
+ bits += 1
+
+ if handler_size_bytes == 0 or handler_size_bytes != (1 << bits):
+ raise DataParseError("handler-size (%d) must be power of 2" \
+ % orig_bytes)
+ handler_size_bits = bits
+
+#
+# Parse arch config file --
+# Copy a file in to asm output file.
+#
+def importFile(tokens):
+ if len(tokens) != 2:
+ raise DataParseError("import requires one argument")
+ source = tokens[1]
+ if source.endswith(".S"):
+ appendSourceFile(tokens[1], getGlobalSubDict(), asm_fp, None)
+ else:
+ raise DataParseError("don't know how to import %s (expecting .cpp/.S)"
+ % source)
+
+#
+# Parse arch config file --
+# Copy a file in to the C or asm output file.
+#
+def setAsmStub(tokens):
+ global asm_stub_text
+ if len(tokens) != 2:
+ raise DataParseError("import requires one argument")
+ try:
+ stub_fp = open(tokens[1])
+ asm_stub_text = stub_fp.readlines()
+ except IOError, err:
+ stub_fp.close()
+ raise DataParseError("unable to load asm-stub: %s" % str(err))
+ stub_fp.close()
+
+#
+# Parse arch config file --
+# Copy a file in to the C or asm output file.
+#
+def setFallbackStub(tokens):
+ global fallback_stub_text
+ if len(tokens) != 2:
+ raise DataParseError("import requires one argument")
+ try:
+ stub_fp = open(tokens[1])
+ fallback_stub_text = stub_fp.readlines()
+ except IOError, err:
+ stub_fp.close()
+ raise DataParseError("unable to load fallback-stub: %s" % str(err))
+ stub_fp.close()
+#
+# Parse arch config file --
+# Record location of default alt stub
+#
+def setAsmAltStub(tokens):
+ global default_alt_stub, generate_alt_table
+ if len(tokens) != 2:
+ raise DataParseError("import requires one argument")
+ default_alt_stub = tokens[1]
+ generate_alt_table = True
+
+#
+# Parse arch config file --
+# Start of opcode list.
+#
+def opStart(tokens):
+ global in_op_start
+ global default_op_dir
+ if len(tokens) != 2:
+ raise DataParseError("opStart takes a directory name argument")
+ if in_op_start != 0:
+ raise DataParseError("opStart can only be specified once")
+ default_op_dir = tokens[1]
+ in_op_start = 1
+
+#
+# Parse arch config file --
+# Set location of a single alt opcode's source file.
+#
+def altEntry(tokens):
+ global generate_alt_table
+ if len(tokens) != 3:
+ raise DataParseError("alt requires exactly two arguments")
+ if in_op_start != 1:
+ raise DataParseError("alt statements must be between opStart/opEnd")
+ try:
+ index = opcodes.index(tokens[1])
+ except ValueError:
+ raise DataParseError("unknown opcode %s" % tokens[1])
+ if alt_opcode_locations.has_key(tokens[1]):
+ print "Note: alt overrides earlier %s (%s -> %s)" \
+ % (tokens[1], alt_opcode_locations[tokens[1]], tokens[2])
+ alt_opcode_locations[tokens[1]] = tokens[2]
+ generate_alt_table = True
+
+#
+# Parse arch config file --
+# Set location of a single opcode's source file.
+#
+def opEntry(tokens):
+ #global opcode_locations
+ if len(tokens) != 3:
+ raise DataParseError("op requires exactly two arguments")
+ if in_op_start != 1:
+ raise DataParseError("op statements must be between opStart/opEnd")
+ try:
+ index = opcodes.index(tokens[1])
+ except ValueError:
+ raise DataParseError("unknown opcode %s" % tokens[1])
+ if opcode_locations.has_key(tokens[1]):
+ print "Note: op overrides earlier %s (%s -> %s)" \
+ % (tokens[1], opcode_locations[tokens[1]], tokens[2])
+ opcode_locations[tokens[1]] = tokens[2]
+
+#
+# Parse arch config file --
+# End of opcode list; emit instruction blocks.
+#
+def opEnd(tokens):
+ global in_op_start
+ if len(tokens) != 1:
+ raise DataParseError("opEnd takes no arguments")
+ if in_op_start != 1:
+ raise DataParseError("opEnd must follow opStart, and only appear once")
+ in_op_start = 2
+
+ loadAndEmitOpcodes()
+ if splitops == False:
+ if generate_alt_table:
+ loadAndEmitAltOpcodes()
+
+def genaltop(tokens):
+ if in_op_start != 2:
+ raise DataParseError("alt-op can be specified only after op-end")
+ if len(tokens) != 1:
+ raise DataParseError("opEnd takes no arguments")
+ if generate_alt_table:
+ loadAndEmitAltOpcodes()
+
+#
+# Extract an ordered list of instructions from the VM sources. We use the
+# "goto table" definition macro, which has exactly kNumPackedOpcodes
+# entries.
+#
+def getOpcodeList():
+ opcodes = []
+ opcode_fp = open(interp_defs_file)
+ opcode_re = re.compile(r"^\s*V\((....), (\w+),.*", re.DOTALL)
+ for line in opcode_fp:
+ match = opcode_re.match(line)
+ if not match:
+ continue
+ opcodes.append("op_" + match.group(2).lower())
+ opcode_fp.close()
+
+ if len(opcodes) != kNumPackedOpcodes:
+ print "ERROR: found %d opcodes in Interp.h (expected %d)" \
+ % (len(opcodes), kNumPackedOpcodes)
+ raise SyntaxError, "bad opcode count"
+ return opcodes
+
+def emitAlign():
+ if style == "computed-goto":
+ asm_fp.write(" .balign %d\n" % handler_size_bytes)
+
+#
+# Load and emit opcodes for all kNumPackedOpcodes instructions.
+#
+def loadAndEmitOpcodes():
+ sister_list = []
+ assert len(opcodes) == kNumPackedOpcodes
+ need_dummy_start = False
+ start_label = "artMterpAsmInstructionStart"
+ end_label = "artMterpAsmInstructionEnd"
+
+ # point MterpAsmInstructionStart at the first handler or stub
+ asm_fp.write("\n .global %s\n" % start_label)
+ asm_fp.write(" .type %s, %%function\n" % start_label)
+ asm_fp.write("%s = " % start_label + label_prefix + "_op_nop\n")
+ asm_fp.write(" .text\n\n")
+
+ for i in xrange(kNumPackedOpcodes):
+ op = opcodes[i]
+
+ if opcode_locations.has_key(op):
+ location = opcode_locations[op]
+ else:
+ location = default_op_dir
+
+ if location == "FALLBACK":
+ emitFallback(i)
+ else:
+ loadAndEmitAsm(location, i, sister_list)
+
+ # For a 100% C implementation, there are no asm handlers or stubs. We
+ # need to have the MterpAsmInstructionStart label point at op_nop, and it's
+ # too annoying to try to slide it in after the alignment psuedo-op, so
+ # we take the low road and just emit a dummy op_nop here.
+ if need_dummy_start:
+ emitAlign()
+ asm_fp.write(label_prefix + "_op_nop: /* dummy */\n");
+
+ emitAlign()
+ asm_fp.write(" .size %s, .-%s\n" % (start_label, start_label))
+ asm_fp.write(" .global %s\n" % end_label)
+ asm_fp.write("%s:\n" % end_label)
+
+ if style == "computed-goto":
+ emitSectionComment("Sister implementations", asm_fp)
+ asm_fp.write(" .global artMterpAsmSisterStart\n")
+ asm_fp.write(" .type artMterpAsmSisterStart, %function\n")
+ asm_fp.write(" .text\n")
+ asm_fp.write(" .balign 4\n")
+ asm_fp.write("artMterpAsmSisterStart:\n")
+ asm_fp.writelines(sister_list)
+ asm_fp.write("\n .size artMterpAsmSisterStart, .-artMterpAsmSisterStart\n")
+ asm_fp.write(" .global artMterpAsmSisterEnd\n")
+ asm_fp.write("artMterpAsmSisterEnd:\n\n")
+
+#
+# Load an alternate entry stub
+#
+def loadAndEmitAltStub(source, opindex):
+ op = opcodes[opindex]
+ if verbose:
+ print " alt emit %s --> stub" % source
+ dict = getGlobalSubDict()
+ dict.update({ "opcode":op, "opnum":opindex })
+
+ emitAsmHeader(asm_fp, dict, alt_label_prefix)
+ appendSourceFile(source, dict, asm_fp, None)
+
+#
+# Load and emit alternate opcodes for all kNumPackedOpcodes instructions.
+#
+def loadAndEmitAltOpcodes():
+ assert len(opcodes) == kNumPackedOpcodes
+ start_label = "artMterpAsmAltInstructionStart"
+ end_label = "artMterpAsmAltInstructionEnd"
+
+ # point MterpAsmInstructionStart at the first handler or stub
+ asm_fp.write("\n .global %s\n" % start_label)
+ asm_fp.write(" .type %s, %%function\n" % start_label)
+ asm_fp.write(" .text\n\n")
+ asm_fp.write("%s = " % start_label + label_prefix + "_ALT_op_nop\n")
+
+ for i in xrange(kNumPackedOpcodes):
+ op = opcodes[i]
+ if alt_opcode_locations.has_key(op):
+ source = "%s/alt_%s.S" % (alt_opcode_locations[op], op)
+ else:
+ source = default_alt_stub
+ loadAndEmitAltStub(source, i)
+
+ emitAlign()
+ asm_fp.write(" .size %s, .-%s\n" % (start_label, start_label))
+ asm_fp.write(" .global %s\n" % end_label)
+ asm_fp.write("%s:\n" % end_label)
+
+#
+# Load an assembly fragment and emit it.
+#
+def loadAndEmitAsm(location, opindex, sister_list):
+ op = opcodes[opindex]
+ source = "%s/%s.S" % (location, op)
+ dict = getGlobalSubDict()
+ dict.update({ "opcode":op, "opnum":opindex })
+ if verbose:
+ print " emit %s --> asm" % source
+
+ emitAsmHeader(asm_fp, dict, label_prefix)
+ appendSourceFile(source, dict, asm_fp, sister_list)
+
+#
+# Emit fallback fragment
+#
+def emitFallback(opindex):
+ op = opcodes[opindex]
+ dict = getGlobalSubDict()
+ dict.update({ "opcode":op, "opnum":opindex })
+ emitAsmHeader(asm_fp, dict, label_prefix)
+ for line in fallback_stub_text:
+ asm_fp.write(line)
+ asm_fp.write("\n")
+
+#
+# Output the alignment directive and label for an assembly piece.
+#
+def emitAsmHeader(outfp, dict, prefix):
+ outfp.write("/* ------------------------------ */\n")
+ # The alignment directive ensures that the handler occupies
+ # at least the correct amount of space. We don't try to deal
+ # with overflow here.
+ emitAlign()
+ # Emit a label so that gdb will say the right thing. We prepend an
+ # underscore so the symbol name doesn't clash with the Opcode enum.
+ outfp.write(prefix + "_%(opcode)s: /* 0x%(opnum)02x */\n" % dict)
+
+#
+# Output a generic instruction stub that updates the "glue" struct and
+# calls the C implementation.
+#
+def emitAsmStub(outfp, dict):
+ emitAsmHeader(outfp, dict, label_prefix)
+ for line in asm_stub_text:
+ templ = Template(line)
+ outfp.write(templ.substitute(dict))
+
+#
+# Append the file specified by "source" to the open "outfp". Each line will
+# be template-replaced using the substitution dictionary "dict".
+#
+# If the first line of the file starts with "%" it is taken as a directive.
+# A "%include" line contains a filename and, optionally, a Python-style
+# dictionary declaration with substitution strings. (This is implemented
+# with recursion.)
+#
+# If "sister_list" is provided, and we find a line that contains only "&",
+# all subsequent lines from the file will be appended to sister_list instead
+# of copied to the output.
+#
+# This may modify "dict".
+#
+def appendSourceFile(source, dict, outfp, sister_list):
+ outfp.write("/* File: %s */\n" % source)
+ infp = open(source, "r")
+ in_sister = False
+ for line in infp:
+ if line.startswith("%include"):
+ # Parse the "include" line
+ tokens = line.strip().split(' ', 2)
+ if len(tokens) < 2:
+ raise DataParseError("malformed %%include in %s" % source)
+
+ alt_source = tokens[1].strip("\"")
+ if alt_source == source:
+ raise DataParseError("self-referential %%include in %s"
+ % source)
+
+ new_dict = dict.copy()
+ if len(tokens) == 3:
+ new_dict.update(eval(tokens[2]))
+ #print " including src=%s dict=%s" % (alt_source, new_dict)
+ appendSourceFile(alt_source, new_dict, outfp, sister_list)
+ continue
+
+ elif line.startswith("%default"):
+ # copy keywords into dictionary
+ tokens = line.strip().split(' ', 1)
+ if len(tokens) < 2:
+ raise DataParseError("malformed %%default in %s" % source)
+ defaultValues = eval(tokens[1])
+ for entry in defaultValues:
+ dict.setdefault(entry, defaultValues[entry])
+ continue
+
+ elif line.startswith("%break") and sister_list != None:
+ # allow more than one %break, ignoring all following the first
+ if style == "computed-goto" and not in_sister:
+ in_sister = True
+ sister_list.append("\n/* continuation for %(opcode)s */\n"%dict)
+ continue
+
+ # perform keyword substitution if a dictionary was provided
+ if dict != None:
+ templ = Template(line)
+ try:
+ subline = templ.substitute(dict)
+ except KeyError, err:
+ raise DataParseError("keyword substitution failed in %s: %s"
+ % (source, str(err)))
+ except:
+ print "ERROR: substitution failed: " + line
+ raise
+ else:
+ subline = line
+
+ # write output to appropriate file
+ if in_sister:
+ sister_list.append(subline)
+ else:
+ outfp.write(subline)
+ outfp.write("\n")
+ infp.close()
+
+#
+# Emit a C-style section header comment.
+#
+def emitSectionComment(str, fp):
+ equals = "========================================" \
+ "==================================="
+
+ fp.write("\n/*\n * %s\n * %s\n * %s\n */\n" %
+ (equals, str, equals))
+
+
+#
+# ===========================================================================
+# "main" code
+#
+
+#
+# Check args.
+#
+if len(sys.argv) != 3:
+ print "Usage: %s target-arch output-dir" % sys.argv[0]
+ sys.exit(2)
+
+target_arch = sys.argv[1]
+output_dir = sys.argv[2]
+
+#
+# Extract opcode list.
+#
+opcodes = getOpcodeList()
+#for op in opcodes:
+# print " %s" % op
+
+#
+# Open config file.
+#
+try:
+ config_fp = open("config_%s" % target_arch)
+except:
+ print "Unable to open config file 'config_%s'" % target_arch
+ sys.exit(1)
+
+#
+# Open and prepare output files.
+#
+try:
+ asm_fp = open("%s/mterp_%s.S" % (output_dir, target_arch), "w")
+except:
+ print "Unable to open output files"
+ print "Make sure directory '%s' exists and existing files are writable" \
+ % output_dir
+ # Ideally we'd remove the files to avoid confusing "make", but if they
+ # failed to open we probably won't be able to remove them either.
+ sys.exit(1)
+
+print "Generating %s" % (asm_fp.name)
+
+file_header = """/*
+ * This file was generated automatically by gen-mterp.py for '%s'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+""" % (target_arch)
+
+asm_fp.write(file_header)
+
+#
+# Process the config file.
+#
+failed = False
+try:
+ for line in config_fp:
+ line = line.strip() # remove CRLF, leading spaces
+ tokens = line.split(' ') # tokenize
+ #print "%d: %s" % (len(tokens), tokens)
+ if len(tokens[0]) == 0:
+ #print " blank"
+ pass
+ elif tokens[0][0] == '#':
+ #print " comment"
+ pass
+ else:
+ if tokens[0] == "handler-size":
+ setHandlerSize(tokens)
+ elif tokens[0] == "import":
+ importFile(tokens)
+ elif tokens[0] == "asm-stub":
+ setAsmStub(tokens)
+ elif tokens[0] == "asm-alt-stub":
+ setAsmAltStub(tokens)
+ elif tokens[0] == "op-start":
+ opStart(tokens)
+ elif tokens[0] == "op-end":
+ opEnd(tokens)
+ elif tokens[0] == "alt":
+ altEntry(tokens)
+ elif tokens[0] == "op":
+ opEntry(tokens)
+ elif tokens[0] == "handler-style":
+ setHandlerStyle(tokens)
+ elif tokens[0] == "alt-ops":
+ genaltop(tokens)
+ elif tokens[0] == "split-ops":
+ splitops = True
+ elif tokens[0] == "fallback-stub":
+ setFallbackStub(tokens)
+ else:
+ raise DataParseError, "unrecognized command '%s'" % tokens[0]
+ if style == None:
+ print "tokens[0] = %s" % tokens[0]
+ raise DataParseError, "handler-style must be first command"
+except DataParseError, err:
+ print "Failed: " + str(err)
+ # TODO: remove output files so "make" doesn't get confused
+ failed = True
+ asm_fp.close()
+ asm_fp = None
+
+config_fp.close()
+
+#
+# Done!
+#
+if asm_fp:
+ asm_fp.close()
+
+sys.exit(failed)
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
new file mode 100644
index 0000000..9975458
--- /dev/null
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -0,0 +1,620 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Mterp entry point and support functions.
+ */
+#include "interpreter/interpreter_common.h"
+#include "entrypoints/entrypoint_utils-inl.h"
+#include "mterp.h"
+
+namespace art {
+namespace interpreter {
+/*
+ * Verify some constants used by the mterp interpreter.
+ */
+void CheckMterpAsmConstants() {
+ /*
+ * If we're using computed goto instruction transitions, make sure
+ * none of the handlers overflows the 128-byte limit. This won't tell
+ * which one did, but if any one is too big the total size will
+ * overflow.
+ */
+ const int width = 128;
+ int interp_size = (uintptr_t) artMterpAsmInstructionEnd -
+ (uintptr_t) artMterpAsmInstructionStart;
+ if ((interp_size == 0) || (interp_size != (art::kNumPackedOpcodes * width))) {
+ LOG(art::FATAL) << "ERROR: unexpected asm interp size " << interp_size
+ << "(did an instruction handler exceed " << width << " bytes?)";
+ }
+}
+
+void InitMterpTls(Thread* self) {
+ self->SetMterpDefaultIBase(artMterpAsmInstructionStart);
+ self->SetMterpAltIBase(artMterpAsmAltInstructionStart);
+ self->SetMterpCurrentIBase(artMterpAsmInstructionStart);
+}
+
+/*
+ * Find the matching case. Returns the offset to the handler instructions.
+ *
+ * Returns 3 if we don't find a match (it's the size of the sparse-switch
+ * instruction).
+ */
+extern "C" int32_t MterpDoSparseSwitch(const uint16_t* switchData, int32_t testVal) {
+ const int kInstrLen = 3;
+ uint16_t size;
+ const int32_t* keys;
+ const int32_t* entries;
+
+ /*
+ * Sparse switch data format:
+ * ushort ident = 0x0200 magic value
+ * ushort size number of entries in the table; > 0
+ * int keys[size] keys, sorted low-to-high; 32-bit aligned
+ * int targets[size] branch targets, relative to switch opcode
+ *
+ * Total size is (2+size*4) 16-bit code units.
+ */
+
+ uint16_t signature = *switchData++;
+ DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kSparseSwitchSignature));
+
+ size = *switchData++;
+
+ /* The keys are guaranteed to be aligned on a 32-bit boundary;
+ * we can treat them as a native int array.
+ */
+ keys = reinterpret_cast<const int32_t*>(switchData);
+
+ /* The entries are guaranteed to be aligned on a 32-bit boundary;
+ * we can treat them as a native int array.
+ */
+ entries = keys + size;
+
+ /*
+ * Binary-search through the array of keys, which are guaranteed to
+ * be sorted low-to-high.
+ */
+ int lo = 0;
+ int hi = size - 1;
+ while (lo <= hi) {
+ int mid = (lo + hi) >> 1;
+
+ int32_t foundVal = keys[mid];
+ if (testVal < foundVal) {
+ hi = mid - 1;
+ } else if (testVal > foundVal) {
+ lo = mid + 1;
+ } else {
+ return entries[mid];
+ }
+ }
+ return kInstrLen;
+}
+
+extern "C" int32_t MterpDoPackedSwitch(const uint16_t* switchData, int32_t testVal) {
+ const int kInstrLen = 3;
+
+ /*
+ * Packed switch data format:
+ * ushort ident = 0x0100 magic value
+ * ushort size number of entries in the table
+ * int first_key first (and lowest) switch case value
+ * int targets[size] branch targets, relative to switch opcode
+ *
+ * Total size is (4+size*2) 16-bit code units.
+ */
+ uint16_t signature = *switchData++;
+ DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kPackedSwitchSignature));
+
+ uint16_t size = *switchData++;
+
+ int32_t firstKey = *switchData++;
+ firstKey |= (*switchData++) << 16;
+
+ int index = testVal - firstKey;
+ if (index < 0 || index >= size) {
+ return kInstrLen;
+ }
+
+ /*
+ * The entries are guaranteed to be aligned on a 32-bit boundary;
+ * we can treat them as a native int array.
+ */
+ const int32_t* entries = reinterpret_cast<const int32_t*>(switchData);
+ return entries[index];
+}
+
+
+extern "C" bool MterpInvokeVirtual(Thread* self, ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr, uint16_t inst_data )
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ JValue* result_register = shadow_frame->GetResultRegister();
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoInvoke<kVirtual, false, false>(
+ self, *shadow_frame, inst, inst_data, result_register);
+}
+
+extern "C" bool MterpInvokeSuper(Thread* self, ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr, uint16_t inst_data )
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ JValue* result_register = shadow_frame->GetResultRegister();
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoInvoke<kSuper, false, false>(
+ self, *shadow_frame, inst, inst_data, result_register);
+}
+
+extern "C" bool MterpInvokeInterface(Thread* self, ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr, uint16_t inst_data )
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ JValue* result_register = shadow_frame->GetResultRegister();
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoInvoke<kInterface, false, false>(
+ self, *shadow_frame, inst, inst_data, result_register);
+}
+
+extern "C" bool MterpInvokeDirect(Thread* self, ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr, uint16_t inst_data )
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ JValue* result_register = shadow_frame->GetResultRegister();
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoInvoke<kDirect, false, false>(
+ self, *shadow_frame, inst, inst_data, result_register);
+}
+
+extern "C" bool MterpInvokeStatic(Thread* self, ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr, uint16_t inst_data )
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ JValue* result_register = shadow_frame->GetResultRegister();
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoInvoke<kStatic, false, false>(
+ self, *shadow_frame, inst, inst_data, result_register);
+}
+
+extern "C" bool MterpInvokeVirtualRange(Thread* self, ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr, uint16_t inst_data )
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ JValue* result_register = shadow_frame->GetResultRegister();
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoInvoke<kVirtual, true, false>(
+ self, *shadow_frame, inst, inst_data, result_register);
+}
+
+extern "C" bool MterpInvokeSuperRange(Thread* self, ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr, uint16_t inst_data )
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ JValue* result_register = shadow_frame->GetResultRegister();
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoInvoke<kSuper, true, false>(
+ self, *shadow_frame, inst, inst_data, result_register);
+}
+
+extern "C" bool MterpInvokeInterfaceRange(Thread* self, ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr, uint16_t inst_data )
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ JValue* result_register = shadow_frame->GetResultRegister();
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoInvoke<kInterface, true, false>(
+ self, *shadow_frame, inst, inst_data, result_register);
+}
+
+extern "C" bool MterpInvokeDirectRange(Thread* self, ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr, uint16_t inst_data )
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ JValue* result_register = shadow_frame->GetResultRegister();
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoInvoke<kDirect, true, false>(
+ self, *shadow_frame, inst, inst_data, result_register);
+}
+
+extern "C" bool MterpInvokeStaticRange(Thread* self, ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr, uint16_t inst_data )
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ JValue* result_register = shadow_frame->GetResultRegister();
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoInvoke<kStatic, true, false>(
+ self, *shadow_frame, inst, inst_data, result_register);
+}
+
+extern "C" bool MterpInvokeVirtualQuick(Thread* self, ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr, uint16_t inst_data )
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ JValue* result_register = shadow_frame->GetResultRegister();
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoInvokeVirtualQuick<false>(
+ self, *shadow_frame, inst, inst_data, result_register);
+}
+
+extern "C" bool MterpInvokeVirtualQuickRange(Thread* self, ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr, uint16_t inst_data )
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ JValue* result_register = shadow_frame->GetResultRegister();
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoInvokeVirtualQuick<true>(
+ self, *shadow_frame, inst, inst_data, result_register);
+}
+
+extern "C" void MterpThreadFenceForConstructor() {
+ QuasiAtomic::ThreadFenceForConstructor();
+}
+
+extern "C" bool MterpConstString(uint32_t index, uint32_t tgt_vreg, ShadowFrame* shadow_frame,
+ Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ String* s = ResolveString(self, *shadow_frame, index);
+ if (UNLIKELY(s == nullptr)) {
+ return true;
+ }
+ shadow_frame->SetVRegReference(tgt_vreg, s);
+ return false;
+}
+
+extern "C" bool MterpConstClass(uint32_t index, uint32_t tgt_vreg, ShadowFrame* shadow_frame,
+ Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ Class* c = ResolveVerifyAndClinit(index, shadow_frame->GetMethod(), self, false, false);
+ if (UNLIKELY(c == nullptr)) {
+ return true;
+ }
+ shadow_frame->SetVRegReference(tgt_vreg, c);
+ return false;
+}
+
+extern "C" bool MterpCheckCast(uint32_t index, Object* obj, art::ArtMethod* method,
+ Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ Class* c = ResolveVerifyAndClinit(index, method, self, false, false);
+ if (UNLIKELY(c == nullptr)) {
+ return true;
+ }
+ if (UNLIKELY(obj != nullptr && !obj->InstanceOf(c))) {
+ ThrowClassCastException(c, obj->GetClass());
+ return true;
+ }
+ return false;
+}
+
+extern "C" bool MterpInstanceOf(uint32_t index, Object* obj, art::ArtMethod* method,
+ Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ Class* c = ResolveVerifyAndClinit(index, method, self, false, false);
+ if (UNLIKELY(c == nullptr)) {
+ return false; // Caller will check for pending exception. Return value unimportant.
+ }
+ return (obj != nullptr) && obj->InstanceOf(c);
+}
+
+extern "C" bool MterpFillArrayData(Object* obj, const Instruction::ArrayDataPayload* payload)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ return FillArrayData(obj, payload);
+}
+
+extern "C" bool MterpNewInstance(ShadowFrame* shadow_frame, Thread* self, uint32_t inst_data)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
+ Object* obj = nullptr;
+ Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame->GetMethod(),
+ self, false, false);
+ if (LIKELY(c != nullptr)) {
+ if (UNLIKELY(c->IsStringClass())) {
+ gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
+ mirror::SetStringCountVisitor visitor(0);
+ obj = String::Alloc<true>(self, 0, allocator_type, visitor);
+ } else {
+ obj = AllocObjectFromCode<false, true>(
+ inst->VRegB_21c(), shadow_frame->GetMethod(), self,
+ Runtime::Current()->GetHeap()->GetCurrentAllocator());
+ }
+ }
+ if (UNLIKELY(obj == nullptr)) {
+ return false;
+ }
+ obj->GetClass()->AssertInitializedOrInitializingInThread(self);
+ shadow_frame->SetVRegReference(inst->VRegA_21c(inst_data), obj);
+ return true;
+}
+
+extern "C" bool MterpSputObject(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
+ uint32_t inst_data, Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, false, false>
+ (self, *shadow_frame, inst, inst_data);
+}
+
+extern "C" bool MterpIputObject(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
+ uint32_t inst_data, Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, false, false>
+ (self, *shadow_frame, inst, inst_data);
+}
+
+extern "C" bool MterpIputObjectQuick(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
+ uint32_t inst_data)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoIPutQuick<Primitive::kPrimNot, false>(*shadow_frame, inst, inst_data);
+}
+
+extern "C" bool MterpAputObject(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
+ uint32_t inst_data)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ Object* a = shadow_frame->GetVRegReference(inst->VRegB_23x());
+ if (UNLIKELY(a == nullptr)) {
+ return false;
+ }
+ int32_t index = shadow_frame->GetVReg(inst->VRegC_23x());
+ Object* val = shadow_frame->GetVRegReference(inst->VRegA_23x(inst_data));
+ ObjectArray<Object>* array = a->AsObjectArray<Object>();
+ if (array->CheckIsValidIndex(index) && array->CheckAssignable(val)) {
+ array->SetWithoutChecks<false>(index, val);
+ return true;
+ }
+ return false;
+}
+
+extern "C" bool MterpFilledNewArray(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
+ Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoFilledNewArray<false, false, false>(inst, *shadow_frame, self,
+ shadow_frame->GetResultRegister());
+}
+
+extern "C" bool MterpFilledNewArrayRange(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
+ Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ return DoFilledNewArray<true, false, false>(inst, *shadow_frame, self,
+ shadow_frame->GetResultRegister());
+}
+
+extern "C" bool MterpNewArray(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
+ uint32_t inst_data, Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ const Instruction* inst = Instruction::At(dex_pc_ptr);
+ int32_t length = shadow_frame->GetVReg(inst->VRegB_22c(inst_data));
+ Object* obj = AllocArrayFromCode<false, true>(
+ inst->VRegC_22c(), length, shadow_frame->GetMethod(), self,
+ Runtime::Current()->GetHeap()->GetCurrentAllocator());
+ if (UNLIKELY(obj == nullptr)) {
+ return false;
+ }
+ shadow_frame->SetVRegReference(inst->VRegA_22c(inst_data), obj);
+ return true;
+}
+
+extern "C" bool MterpHandleException(Thread* self, ShadowFrame* shadow_frame)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(self->IsExceptionPending());
+ const instrumentation::Instrumentation* const instrumentation =
+ Runtime::Current()->GetInstrumentation();
+ uint32_t found_dex_pc = FindNextInstructionFollowingException(self, *shadow_frame,
+ shadow_frame->GetDexPC(),
+ instrumentation);
+ if (found_dex_pc == DexFile::kDexNoIndex) {
+ return false;
+ }
+ // OK - we can deal with it. Update and continue.
+ shadow_frame->SetDexPC(found_dex_pc);
+ return true;
+}
+
+extern "C" void MterpCheckBefore(Thread* self, ShadowFrame* shadow_frame)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
+ uint16_t inst_data = inst->Fetch16(0);
+ if (inst->Opcode(inst_data) == Instruction::MOVE_EXCEPTION) {
+ self->AssertPendingException();
+ } else {
+ self->AssertNoPendingException();
+ }
+}
+
+extern "C" void MterpLogDivideByZeroException(Thread* self, ShadowFrame* shadow_frame)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ UNUSED(self);
+ const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
+ uint16_t inst_data = inst->Fetch16(0);
+ LOG(INFO) << "DivideByZero: " << inst->Opcode(inst_data);
+}
+
+extern "C" void MterpLogArrayIndexException(Thread* self, ShadowFrame* shadow_frame)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ UNUSED(self);
+ const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
+ uint16_t inst_data = inst->Fetch16(0);
+ LOG(INFO) << "ArrayIndex: " << inst->Opcode(inst_data);
+}
+
+extern "C" void MterpLogNegativeArraySizeException(Thread* self, ShadowFrame* shadow_frame)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ UNUSED(self);
+ const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
+ uint16_t inst_data = inst->Fetch16(0);
+ LOG(INFO) << "NegativeArraySize: " << inst->Opcode(inst_data);
+}
+
+extern "C" void MterpLogNoSuchMethodException(Thread* self, ShadowFrame* shadow_frame)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ UNUSED(self);
+ const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
+ uint16_t inst_data = inst->Fetch16(0);
+ LOG(INFO) << "NoSuchMethod: " << inst->Opcode(inst_data);
+}
+
+extern "C" void MterpLogExceptionThrownException(Thread* self, ShadowFrame* shadow_frame)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ UNUSED(self);
+ const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
+ uint16_t inst_data = inst->Fetch16(0);
+ LOG(INFO) << "ExceptionThrown: " << inst->Opcode(inst_data);
+}
+
+extern "C" void MterpLogNullObjectException(Thread* self, ShadowFrame* shadow_frame)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ UNUSED(self);
+ const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
+ uint16_t inst_data = inst->Fetch16(0);
+ LOG(INFO) << "NullObject: " << inst->Opcode(inst_data);
+}
+
+extern "C" void MterpLogFallback(Thread* self, ShadowFrame* shadow_frame)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ UNUSED(self);
+ const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
+ uint16_t inst_data = inst->Fetch16(0);
+ LOG(INFO) << "Fallback: " << inst->Opcode(inst_data) << ", Suspend Pending?: "
+ << self->IsExceptionPending();
+}
+
+extern "C" void MterpLogSuspendFallback(Thread* self, ShadowFrame* shadow_frame, uint32_t flags)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ UNUSED(self);
+ const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
+ uint16_t inst_data = inst->Fetch16(0);
+ if (flags & kCheckpointRequest) {
+ LOG(INFO) << "Checkpoint fallback: " << inst->Opcode(inst_data);
+ } else if (flags & kSuspendRequest) {
+ LOG(INFO) << "Suspend fallback: " << inst->Opcode(inst_data);
+ }
+}
+
+extern "C" void MterpSuspendCheck(Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ self->AllowThreadSuspension();
+}
+
+extern "C" int artSet64IndirectStaticFromMterp(uint32_t field_idx, ArtMethod* referrer,
+ uint64_t* new_value, Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
+ ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite,
+ sizeof(int64_t));
+ if (LIKELY(field != nullptr)) {
+ // Compiled code can't use transactional mode.
+ field->Set64<false>(field->GetDeclaringClass(), *new_value);
+ return 0; // success
+ }
+ field = FindFieldFromCode<StaticPrimitiveWrite, true>(field_idx, referrer, self, sizeof(int64_t));
+ if (LIKELY(field != nullptr)) {
+ // Compiled code can't use transactional mode.
+ field->Set64<false>(field->GetDeclaringClass(), *new_value);
+ return 0; // success
+ }
+ return -1; // failure
+}
+
+extern "C" int artSet8InstanceFromMterp(uint32_t field_idx, mirror::Object* obj, uint8_t new_value,
+ ArtMethod* referrer)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
+ sizeof(int8_t));
+ if (LIKELY(field != nullptr && obj != nullptr)) {
+ Primitive::Type type = field->GetTypeAsPrimitiveType();
+ if (type == Primitive::kPrimBoolean) {
+ field->SetBoolean<false>(obj, new_value);
+ } else {
+ DCHECK_EQ(Primitive::kPrimByte, type);
+ field->SetByte<false>(obj, new_value);
+ }
+ return 0; // success
+ }
+ return -1; // failure
+}
+
+extern "C" int artSet16InstanceFromMterp(uint32_t field_idx, mirror::Object* obj, uint16_t new_value,
+ ArtMethod* referrer)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
+ sizeof(int16_t));
+ if (LIKELY(field != nullptr && obj != nullptr)) {
+ Primitive::Type type = field->GetTypeAsPrimitiveType();
+ if (type == Primitive::kPrimChar) {
+ field->SetChar<false>(obj, new_value);
+ } else {
+ DCHECK_EQ(Primitive::kPrimShort, type);
+ field->SetShort<false>(obj, new_value);
+ }
+ return 0; // success
+ }
+ return -1; // failure
+}
+
+extern "C" int artSet32InstanceFromMterp(uint32_t field_idx, mirror::Object* obj,
+ uint32_t new_value, ArtMethod* referrer)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
+ sizeof(int32_t));
+ if (LIKELY(field != nullptr && obj != nullptr)) {
+ field->Set32<false>(obj, new_value);
+ return 0; // success
+ }
+ return -1; // failure
+}
+
+extern "C" int artSet64InstanceFromMterp(uint32_t field_idx, mirror::Object* obj,
+ uint64_t* new_value, ArtMethod* referrer)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
+ sizeof(int64_t));
+ if (LIKELY(field != nullptr && obj != nullptr)) {
+ field->Set64<false>(obj, *new_value);
+ return 0; // success
+ }
+ return -1; // failure
+}
+
+extern "C" int artSetObjInstanceFromMterp(uint32_t field_idx, mirror::Object* obj,
+ mirror::Object* new_value, ArtMethod* referrer)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite,
+ sizeof(mirror::HeapReference<mirror::Object>));
+ if (LIKELY(field != nullptr && obj != nullptr)) {
+ field->SetObj<false>(obj, new_value);
+ return 0; // success
+ }
+ return -1; // failure
+}
+
+extern "C" mirror::Object* artAGetObjectFromMterp(mirror::Object* arr, int32_t index)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (UNLIKELY(arr == nullptr)) {
+ ThrowNullPointerExceptionFromInterpreter();
+ return nullptr;
+ }
+ ObjectArray<Object>* array = arr->AsObjectArray<Object>();
+ if (LIKELY(array->CheckIsValidIndex(index))) {
+ return array->GetWithoutChecks(index);
+ } else {
+ return nullptr;
+ }
+}
+
+extern "C" mirror::Object* artIGetObjectFromMterp(mirror::Object* obj, uint32_t field_offset)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (UNLIKELY(obj == nullptr)) {
+ ThrowNullPointerExceptionFromInterpreter();
+ return nullptr;
+ }
+ return obj->GetFieldObject<mirror::Object>(MemberOffset(field_offset));
+}
+
+} // namespace interpreter
+} // namespace art
diff --git a/runtime/interpreter/mterp/mterp.h b/runtime/interpreter/mterp/mterp.h
new file mode 100644
index 0000000..90d21e9
--- /dev/null
+++ b/runtime/interpreter/mterp/mterp.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_INTERPRETER_MTERP_MTERP_H_
+#define ART_RUNTIME_INTERPRETER_MTERP_MTERP_H_
+
+/*
+ * Mterp assembly handler bases
+ */
+extern "C" void* artMterpAsmInstructionStart[];
+extern "C" void* artMterpAsmInstructionEnd[];
+extern "C" void* artMterpAsmAltInstructionStart[];
+extern "C" void* artMterpAsmAltInstructionEnd[];
+
+namespace art {
+namespace interpreter {
+
+void InitMterpTls(Thread* self);
+void CheckMterpAsmConstants();
+
+} // namespace interpreter
+} // namespace art
+
+#endif // ART_RUNTIME_INTERPRETER_MTERP_MTERP_H_
diff --git a/runtime/interpreter/mterp/mterp_stub.cc b/runtime/interpreter/mterp/mterp_stub.cc
new file mode 100644
index 0000000..7e7337e
--- /dev/null
+++ b/runtime/interpreter/mterp/mterp_stub.cc
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "../interpreter_common.h"
+
+/*
+ * Stub definitions for targets without mterp implementations.
+ */
+
+namespace art {
+namespace interpreter {
+/*
+ * Call this during initialization to verify that the values in asm-constants.h
+ * are still correct.
+ */
+void CheckMterpAsmConstants() {
+ // Dummy version when mterp not implemented.
+}
+
+void InitMterpTls(Thread* self) {
+ self->SetMterpDefaultIBase(nullptr);
+ self->SetMterpCurrentIBase(nullptr);
+ self->SetMterpAltIBase(nullptr);
+}
+
+/*
+ * The platform-specific implementation must provide this.
+ */
+extern "C" bool ExecuteMterpImpl(Thread* self, const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result_register)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ UNUSED(self); UNUSED(shadow_frame); UNUSED(code_item); UNUSED(result_register);
+ UNIMPLEMENTED(art::FATAL);
+ return false;
+}
+
+} // namespace interpreter
+} // namespace art
diff --git a/runtime/interpreter/mterp/out/mterp_arm.S b/runtime/interpreter/mterp/out/mterp_arm.S
new file mode 100644
index 0000000..2d6f057
--- /dev/null
+++ b/runtime/interpreter/mterp/out/mterp_arm.S
@@ -0,0 +1,12202 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'arm'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: arm/header.S */
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ Art assembly interpreter notes:
+
+ First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
+ handle invoke, allows higher-level code to create frame & shadow frame.
+
+ Once that's working, support direct entry code & eliminate shadow frame (and
+ excess locals allocation.
+
+ Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
+ base of the vreg array within the shadow frame. Access the other fields,
+ dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
+ the shadow frame mechanism of double-storing object references - via rFP &
+ number_of_vregs_.
+
+ */
+
+/*
+ARM EABI general notes:
+
+r0-r3 hold first 4 args to a method; they are not preserved across method calls
+r4-r8 are available for general use
+r9 is given special treatment in some situations, but not for us
+r10 (sl) seems to be generally available
+r11 (fp) is used by gcc (unless -fomit-frame-pointer is set)
+r12 (ip) is scratch -- not preserved across method calls
+r13 (sp) should be managed carefully in case a signal arrives
+r14 (lr) must be preserved
+r15 (pc) can be tinkered with directly
+
+r0 holds returns of <= 4 bytes
+r0-r1 hold returns of 8 bytes, low word in r0
+
+Callee must save/restore r4+ (except r12) if it modifies them. If VFP
+is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved,
+s0-s15 (d0-d7, q0-a3) do not need to be.
+
+Stack is "full descending". Only the arguments that don't fit in the first 4
+registers are placed on the stack. "sp" points at the first stacked argument
+(i.e. the 5th arg).
+
+VFP: single-precision results in s0, double-precision results in d0.
+
+In the EABI, "sp" must be 64-bit aligned on entry to a function, and any
+64-bit quantities (long long, double) must be 64-bit aligned.
+*/
+
+/*
+Mterp and ARM notes:
+
+The following registers have fixed assignments:
+
+ reg nick purpose
+ r4 rPC interpreted program counter, used for fetching instructions
+ r5 rFP interpreted frame pointer, used for accessing locals and args
+ r6 rSELF self (Thread) pointer
+ r7 rINST first 16-bit code unit of current instruction
+ r8 rIBASE interpreted instruction base pointer, used for computed goto
+ r11 rREFS base of object references in shadow frame (ideally, we'll get rid of this later).
+
+Macros are provided for common operations. Each macro MUST emit only
+one instruction to make instruction-counting easier. They MUST NOT alter
+unspecified registers or condition codes.
+*/
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "asm_support.h"
+
+/* During bringup, we'll use the shadow frame model instead of rFP */
+/* single-purpose registers, given names for clarity */
+#define rPC r4
+#define rFP r5
+#define rSELF r6
+#define rINST r7
+#define rIBASE r8
+#define rREFS r11
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
+ * to access other shadow frame fields, we need to use a backwards offset. Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
+#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
+
+/*
+ *
+ * The reference interpreter performs explicit suspect checks, which is somewhat wasteful.
+ * Dalvik's interpreter folded suspend checks into the jump table mechanism, and eventually
+ * mterp should do so as well.
+ */
+#define MTERP_SUSPEND 0
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array. For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+.macro EXPORT_PC
+ str rPC, [rFP, #OFF_FP_DEX_PC_PTR]
+.endm
+
+.macro EXPORT_DEX_PC tmp
+ ldr \tmp, [rFP, #OFF_FP_CODE_ITEM]
+ str rPC, [rFP, #OFF_FP_DEX_PC_PTR]
+ add \tmp, #CODEITEM_INSNS_OFFSET
+ sub \tmp, rPC, \tmp
+ asr \tmp, #1
+ str \tmp, [rFP, #OFF_FP_DEX_PC]
+.endm
+
+/*
+ * Fetch the next instruction from rPC into rINST. Does not advance rPC.
+ */
+.macro FETCH_INST
+ ldrh rINST, [rPC]
+.endm
+
+/*
+ * Fetch the next instruction from the specified offset. Advances rPC
+ * to point to the next instruction. "_count" is in 16-bit code units.
+ *
+ * Because of the limited size of immediate constants on ARM, this is only
+ * suitable for small forward movements (i.e. don't try to implement "goto"
+ * with this).
+ *
+ * This must come AFTER anything that can throw an exception, or the
+ * exception catch may miss. (This also implies that it must come after
+ * EXPORT_PC.)
+ */
+.macro FETCH_ADVANCE_INST count
+ ldrh rINST, [rPC, #((\count)*2)]!
+.endm
+
+/*
+ * The operation performed here is similar to FETCH_ADVANCE_INST, except the
+ * src and dest registers are parameterized (not hard-wired to rPC and rINST).
+ */
+.macro PREFETCH_ADVANCE_INST dreg, sreg, count
+ ldrh \dreg, [\sreg, #((\count)*2)]!
+.endm
+
+/*
+ * Similar to FETCH_ADVANCE_INST, but does not update rPC. Used to load
+ * rINST ahead of possible exception point. Be sure to manually advance rPC
+ * later.
+ */
+.macro PREFETCH_INST count
+ ldrh rINST, [rPC, #((\count)*2)]
+.endm
+
+/* Advance rPC by some number of code units. */
+.macro ADVANCE count
+ add rPC, #((\count)*2)
+.endm
+
+/*
+ * Fetch the next instruction from an offset specified by _reg. Updates
+ * rPC to point to the next instruction. "_reg" must specify the distance
+ * in bytes, *not* 16-bit code units, and may be a signed value.
+ *
+ * We want to write "ldrh rINST, [rPC, _reg, lsl #1]!", but some of the
+ * bits that hold the shift distance are used for the half/byte/sign flags.
+ * In some cases we can pre-double _reg for free, so we require a byte offset
+ * here.
+ */
+.macro FETCH_ADVANCE_INST_RB reg
+ ldrh rINST, [rPC, \reg]!
+.endm
+
+/*
+ * Fetch a half-word code unit from an offset past the current PC. The
+ * "_count" value is in 16-bit code units. Does not advance rPC.
+ *
+ * The "_S" variant works the same but treats the value as signed.
+ */
+.macro FETCH reg, count
+ ldrh \reg, [rPC, #((\count)*2)]
+.endm
+
+.macro FETCH_S reg, count
+ ldrsh \reg, [rPC, #((\count)*2)]
+.endm
+
+/*
+ * Fetch one byte from an offset past the current PC. Pass in the same
+ * "_count" as you would for FETCH, and an additional 0/1 indicating which
+ * byte of the halfword you want (lo/hi).
+ */
+.macro FETCH_B reg, count, byte
+ ldrb \reg, [rPC, #((\count)*2+(\byte))]
+.endm
+
+/*
+ * Put the instruction's opcode field into the specified register.
+ */
+.macro GET_INST_OPCODE reg
+ and \reg, rINST, #255
+.endm
+
+/*
+ * Put the prefetched instruction's opcode field into the specified register.
+ */
+.macro GET_PREFETCHED_OPCODE oreg, ireg
+ and \oreg, \ireg, #255
+.endm
+
+/*
+ * Begin executing the opcode in _reg. Because this only jumps within the
+ * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork.
+ */
+.macro GOTO_OPCODE reg
+ add pc, rIBASE, \reg, lsl #7
+.endm
+.macro GOTO_OPCODE_BASE base,reg
+ add pc, \base, \reg, lsl #7
+.endm
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+.macro GET_VREG reg, vreg
+ ldr \reg, [rFP, \vreg, lsl #2]
+.endm
+.macro SET_VREG reg, vreg
+ str \reg, [rFP, \vreg, lsl #2]
+ mov \reg, #0
+ str \reg, [rREFS, \vreg, lsl #2]
+.endm
+.macro SET_VREG_OBJECT reg, vreg, tmpreg
+ str \reg, [rFP, \vreg, lsl #2]
+ str \reg, [rREFS, \vreg, lsl #2]
+.endm
+
+/*
+ * Convert a virtual register index into an address.
+ */
+.macro VREG_INDEX_TO_ADDR reg, vreg
+ add \reg, rFP, \vreg, lsl #2 /* WARNING/FIXME: handle shadow frame vreg zero if store */
+.endm
+
+/*
+ * Refresh handler table.
+ */
+.macro REFRESH_IBASE
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
+.endm
+
+/* File: arm/entry.S */
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ */
+
+ .text
+ .align 2
+ .global ExecuteMterpImpl
+ .type ExecuteMterpImpl, %function
+
+/*
+ * On entry:
+ * r0 Thread* self/
+ * r1 code_item
+ * r2 ShadowFrame
+ * r3 JValue* result_register
+ *
+ */
+
+ExecuteMterpImpl:
+ .fnstart
+ .save {r4-r10,fp,lr}
+ stmfd sp!, {r4-r10,fp,lr} @ save 9 regs
+ .pad #4
+ sub sp, sp, #4 @ align 64
+
+ /* Remember the return register */
+ str r3, [r2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
+
+ /* Remember the code_item */
+ str r1, [r2, #SHADOWFRAME_CODE_ITEM_OFFSET]
+
+ /* set up "named" registers */
+ mov rSELF, r0
+ ldr r0, [r2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
+ add rFP, r2, #SHADOWFRAME_VREGS_OFFSET @ point to insns[] (i.e. - the dalivk byte code).
+ add rREFS, rFP, r0, lsl #2 @ point to reference array in shadow frame
+ ldr r0, [r2, #SHADOWFRAME_DEX_PC_OFFSET] @ Get starting dex_pc.
+ add rPC, r1, #CODEITEM_INSNS_OFFSET @ Point to base of insns[]
+ add rPC, rPC, r0, lsl #1 @ Create direct pointer to 1st dex opcode
+ EXPORT_PC
+
+ /* Starting ibase */
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
+
+ /* start executing the instruction at rPC */
+ FETCH_INST @ load rINST from rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+ /* NOTE: no fallthrough */
+
+
+ .global artMterpAsmInstructionStart
+ .type artMterpAsmInstructionStart, %function
+artMterpAsmInstructionStart = .L_op_nop
+ .text
+
+/* ------------------------------ */
+ .balign 128
+.L_op_nop: /* 0x00 */
+/* File: arm/op_nop.S */
+ FETCH_ADVANCE_INST 1 @ advance to next instr, load rINST
+ GET_INST_OPCODE ip @ ip<- opcode from rINST
+ GOTO_OPCODE ip @ execute it
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move: /* 0x01 */
+/* File: arm/op_move.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B from 15:12
+ ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ GET_VREG r2, r1 @ r2<- fp[B]
+ GET_INST_OPCODE ip @ ip<- opcode from rINST
+ .if 0
+ SET_VREG_OBJECT r2, r0 @ fp[A]<- r2
+ .else
+ SET_VREG r2, r0 @ fp[A]<- r2
+ .endif
+ GOTO_OPCODE ip @ execute next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_from16: /* 0x02 */
+/* File: arm/op_move_from16.S */
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ FETCH r1, 1 @ r1<- BBBB
+ mov r0, rINST, lsr #8 @ r0<- AA
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_VREG r2, r1 @ r2<- fp[BBBB]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ .if 0
+ SET_VREG_OBJECT r2, r0 @ fp[AA]<- r2
+ .else
+ SET_VREG r2, r0 @ fp[AA]<- r2
+ .endif
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_16: /* 0x03 */
+/* File: arm/op_move_16.S */
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ FETCH r1, 2 @ r1<- BBBB
+ FETCH r0, 1 @ r0<- AAAA
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ GET_VREG r2, r1 @ r2<- fp[BBBB]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ .if 0
+ SET_VREG_OBJECT r2, r0 @ fp[AAAA]<- r2
+ .else
+ SET_VREG r2, r0 @ fp[AAAA]<- r2
+ .endif
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_wide: /* 0x04 */
+/* File: arm/op_move_wide.S */
+ /* move-wide vA, vB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- fp[B]
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r2, {r0-r1} @ fp[A]<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_wide_from16: /* 0x05 */
+/* File: arm/op_move_wide_from16.S */
+ /* move-wide/from16 vAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ FETCH r3, 1 @ r3<- BBBB
+ mov r2, rINST, lsr #8 @ r2<- AA
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r2, {r0-r1} @ fp[AA]<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_wide_16: /* 0x06 */
+/* File: arm/op_move_wide_16.S */
+ /* move-wide/16 vAAAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ FETCH r3, 2 @ r3<- BBBB
+ FETCH r2, 1 @ r2<- AAAA
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AAAA]
+ ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ stmia r2, {r0-r1} @ fp[AAAA]<- r0/r1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_object: /* 0x07 */
+/* File: arm/op_move_object.S */
+/* File: arm/op_move.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B from 15:12
+ ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ GET_VREG r2, r1 @ r2<- fp[B]
+ GET_INST_OPCODE ip @ ip<- opcode from rINST
+ .if 1
+ SET_VREG_OBJECT r2, r0 @ fp[A]<- r2
+ .else
+ SET_VREG r2, r0 @ fp[A]<- r2
+ .endif
+ GOTO_OPCODE ip @ execute next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_object_from16: /* 0x08 */
+/* File: arm/op_move_object_from16.S */
+/* File: arm/op_move_from16.S */
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ FETCH r1, 1 @ r1<- BBBB
+ mov r0, rINST, lsr #8 @ r0<- AA
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_VREG r2, r1 @ r2<- fp[BBBB]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ .if 1
+ SET_VREG_OBJECT r2, r0 @ fp[AA]<- r2
+ .else
+ SET_VREG r2, r0 @ fp[AA]<- r2
+ .endif
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_object_16: /* 0x09 */
+/* File: arm/op_move_object_16.S */
+/* File: arm/op_move_16.S */
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ FETCH r1, 2 @ r1<- BBBB
+ FETCH r0, 1 @ r0<- AAAA
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ GET_VREG r2, r1 @ r2<- fp[BBBB]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ .if 1
+ SET_VREG_OBJECT r2, r0 @ fp[AAAA]<- r2
+ .else
+ SET_VREG r2, r0 @ fp[AAAA]<- r2
+ .endif
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_result: /* 0x0a */
+/* File: arm/op_move_result.S */
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ ldr r0, [rFP, #OFF_FP_RESULT_REGISTER] @ get pointer to result JType.
+ ldr r0, [r0] @ r0 <- result.i.
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ .if 0
+ SET_VREG_OBJECT r0, r2, r1 @ fp[AA]<- r0
+ .else
+ SET_VREG r0, r2 @ fp[AA]<- r0
+ .endif
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_result_wide: /* 0x0b */
+/* File: arm/op_move_result_wide.S */
+ /* move-result-wide vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ ldr r3, [rFP, #OFF_FP_RESULT_REGISTER]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ ldmia r3, {r0-r1} @ r0/r1<- retval.j
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ stmia r2, {r0-r1} @ fp[AA]<- r0/r1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_result_object: /* 0x0c */
+/* File: arm/op_move_result_object.S */
+/* File: arm/op_move_result.S */
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ ldr r0, [rFP, #OFF_FP_RESULT_REGISTER] @ get pointer to result JType.
+ ldr r0, [r0] @ r0 <- result.i.
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ .if 1
+ SET_VREG_OBJECT r0, r2, r1 @ fp[AA]<- r0
+ .else
+ SET_VREG r0, r2 @ fp[AA]<- r0
+ .endif
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_move_exception: /* 0x0d */
+/* File: arm/op_move_exception.S */
+ /* move-exception vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ mov r1, #0 @ r1<- 0
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ SET_VREG_OBJECT r3, r2 @ fp[AA]<- exception obj
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ str r1, [rSELF, #THREAD_EXCEPTION_OFFSET] @ clear exception
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return_void: /* 0x0e */
+/* File: arm/op_return_void.S */
+ .extern MterpThreadFenceForConstructor
+ bl MterpThreadFenceForConstructor
+ mov r0, #0
+ mov r1, #0
+ b MterpReturn
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return: /* 0x0f */
+/* File: arm/op_return.S */
+ /*
+ * Return a 32-bit value.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ bl MterpThreadFenceForConstructor
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG r0, r2 @ r0<- vAA
+ mov r1, #0
+ b MterpReturn
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return_wide: /* 0x10 */
+/* File: arm/op_return_wide.S */
+ /*
+ * Return a 64-bit value.
+ */
+ /* return-wide vAA */
+ .extern MterpThreadFenceForConstructor
+ bl MterpThreadFenceForConstructor
+ mov r2, rINST, lsr #8 @ r2<- AA
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1
+ b MterpReturn
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return_object: /* 0x11 */
+/* File: arm/op_return_object.S */
+/* File: arm/op_return.S */
+ /*
+ * Return a 32-bit value.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ bl MterpThreadFenceForConstructor
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG r0, r2 @ r0<- vAA
+ mov r1, #0
+ b MterpReturn
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_4: /* 0x12 */
+/* File: arm/op_const_4.S */
+ /* const/4 vA, #+B */
+ mov r1, rINST, lsl #16 @ r1<- Bxxx0000
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ mov r1, r1, asr #28 @ r1<- sssssssB (sign-extended)
+ GET_INST_OPCODE ip @ ip<- opcode from rINST
+ SET_VREG r1, r0 @ fp[A]<- r1
+ GOTO_OPCODE ip @ execute next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_16: /* 0x13 */
+/* File: arm/op_const_16.S */
+ /* const/16 vAA, #+BBBB */
+ FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r0, r3 @ vAA<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const: /* 0x14 */
+/* File: arm/op_const.S */
+ /* const vAA, #+BBBBbbbb */
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH r0, 1 @ r0<- bbbb (low
+ FETCH r1, 2 @ r1<- BBBB (high
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r3 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_high16: /* 0x15 */
+/* File: arm/op_const_high16.S */
+ /* const/high16 vAA, #+BBBB0000 */
+ FETCH r0, 1 @ r0<- 0000BBBB (zero-extended
+ mov r3, rINST, lsr #8 @ r3<- AA
+ mov r0, r0, lsl #16 @ r0<- BBBB0000
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r0, r3 @ vAA<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_wide_16: /* 0x16 */
+/* File: arm/op_const_wide_16.S */
+ /* const-wide/16 vAA, #+BBBB */
+ FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended
+ mov r3, rINST, lsr #8 @ r3<- AA
+ mov r1, r0, asr #31 @ r1<- ssssssss
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r3, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_wide_32: /* 0x17 */
+/* File: arm/op_const_wide_32.S */
+ /* const-wide/32 vAA, #+BBBBbbbb */
+ FETCH r0, 1 @ r0<- 0000bbbb (low)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_S r2, 2 @ r2<- ssssBBBB (high)
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ mov r1, r0, asr #31 @ r1<- ssssssss
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r3, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_wide: /* 0x18 */
+/* File: arm/op_const_wide.S */
+ /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+ FETCH r0, 1 @ r0<- bbbb (low)
+ FETCH r1, 2 @ r1<- BBBB (low middle)
+ FETCH r2, 3 @ r2<- hhhh (high middle)
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word)
+ FETCH r3, 4 @ r3<- HHHH (high)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word)
+ FETCH_ADVANCE_INST 5 @ advance rPC, load rINST
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_wide_high16: /* 0x19 */
+/* File: arm/op_const_wide_high16.S */
+ /* const-wide/high16 vAA, #+BBBB000000000000 */
+ FETCH r1, 1 @ r1<- 0000BBBB (zero-extended)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ mov r0, #0 @ r0<- 00000000
+ mov r1, r1, lsl #16 @ r1<- BBBB0000
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r3, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_string: /* 0x1a */
+/* File: arm/op_const_string.S */
+ /* const/string vAA, String@BBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- BBBB
+ mov r1, rINST, lsr #8 @ r1<- AA
+ add r2, rFP, #OFF_FP_SHADOWFRAME
+ mov r3, rSELF
+ bl MterpConstString @ (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2 @ load rINST
+ cmp r0, #0 @ fail?
+ bne MterpPossibleException @ let reference interpreter deal with it.
+ ADVANCE 2 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_string_jumbo: /* 0x1b */
+/* File: arm/op_const_string_jumbo.S */
+ /* const/string vAA, String@BBBBBBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- bbbb (low
+ FETCH r2, 2 @ r2<- BBBB (high
+ mov r1, rINST, lsr #8 @ r1<- AA
+ orr r0, r0, r2, lsl #16 @ r1<- BBBBbbbb
+ add r2, rFP, #OFF_FP_SHADOWFRAME
+ mov r3, rSELF
+ bl MterpConstString @ (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 3 @ advance rPC
+ cmp r0, #0 @ fail?
+ bne MterpPossibleException @ let reference interpreter deal with it.
+ ADVANCE 3 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_const_class: /* 0x1c */
+/* File: arm/op_const_class.S */
+ /* const/class vAA, Class@BBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- BBBB
+ mov r1, rINST, lsr #8 @ r1<- AA
+ add r2, rFP, #OFF_FP_SHADOWFRAME
+ mov r3, rSELF
+ bl MterpConstClass @ (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2
+ cmp r0, #0
+ bne MterpPossibleException
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_monitor_enter: /* 0x1d */
+/* File: arm/op_monitor_enter.S */
+ /*
+ * Synchronize on an object.
+ */
+ /* monitor-enter vAA */
+ EXPORT_PC
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG r0, r2 @ r0<- vAA (object)
+ mov r1, rSELF @ r1<- self
+ bl artLockObjectFromCode
+ cmp r0, #0
+ bne MterpException
+ FETCH_ADVANCE_INST 1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_monitor_exit: /* 0x1e */
+/* File: arm/op_monitor_exit.S */
+ /*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction. See the Dalvik
+ * instruction spec.
+ */
+ /* monitor-exit vAA */
+ EXPORT_PC
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG r0, r2 @ r0<- vAA (object)
+ mov r1, rSELF @ r0<- self
+ bl artUnlockObjectFromCode @ r0<- success for unlock(self, obj)
+ cmp r0, #0 @ failed?
+ bne MterpException
+ FETCH_ADVANCE_INST 1 @ before throw: advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_check_cast: /* 0x1f */
+/* File: arm/op_check_cast.S */
+ /*
+ * Check to see if a cast from one class to another is allowed.
+ */
+ /* check-cast vAA, class@BBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- BBBB
+ mov r1, rINST, lsr #8 @ r1<- AA
+ GET_VREG r1, r1 @ r1<- object
+ ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- method
+ mov r3, rSELF @ r3<- self
+ bl MterpCheckCast @ (index, obj, method, self)
+ PREFETCH_INST 2
+ cmp r0, #0
+ bne MterpPossibleException
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_instance_of: /* 0x20 */
+/* File: arm/op_instance_of.S */
+ /*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ */
+ /* instance-of vA, vB, class@CCCC */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- vB (object)
+ ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- method
+ mov r3, rSELF @ r3<- self
+ mov r9, rINST, lsr #8 @ r9<- A+
+ and r9, r9, #15 @ r9<- A
+ bl MterpInstanceOf @ (index, obj, method, self)
+ ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ PREFETCH_INST 2
+ cmp r1, #0 @ exception pending?
+ bne MterpException
+ ADVANCE 2 @ advance rPC
+ SET_VREG r0, r9 @ vA<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_array_length: /* 0x21 */
+/* File: arm/op_array_length.S */
+ /*
+ * Return the length of an array.
+ */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ GET_VREG r0, r1 @ r0<- vB (object ref)
+ cmp r0, #0 @ is object null?
+ beq common_errNullObject @ yup, fail
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- array length
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r3, r2 @ vB<- length
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_new_instance: /* 0x22 */
+/* File: arm/op_new_instance.S */
+ /*
+ * Create a new instance of a class.
+ */
+ /* new-instance vAA, class@BBBB */
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rSELF
+ mov r2, rINST
+ bl MterpNewInstance @ (shadow_frame, self, inst_data)
+ cmp r0, #0
+ beq MterpPossibleException
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_new_array: /* 0x23 */
+/* File: arm/op_new_array.S */
+ /*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+ /* new-array vA, vB, class@CCCC */
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rINST
+ mov r3, rSELF
+ bl MterpNewArray
+ cmp r0, #0
+ beq MterpPossibleException
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_filled_new_array: /* 0x24 */
+/* File: arm/op_filled_new_array.S */
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+ .extern MterpFilledNewArray
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rSELF
+ bl MterpFilledNewArray
+ cmp r0, #0
+ beq MterpPossibleException
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_filled_new_array_range: /* 0x25 */
+/* File: arm/op_filled_new_array_range.S */
+/* File: arm/op_filled_new_array.S */
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+ .extern MterpFilledNewArrayRange
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rSELF
+ bl MterpFilledNewArrayRange
+ cmp r0, #0
+ beq MterpPossibleException
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_fill_array_data: /* 0x26 */
+/* File: arm/op_fill_array_data.S */
+ /* fill-array-data vAA, +BBBBBBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- bbbb (lo)
+ FETCH r1, 2 @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb
+ GET_VREG r0, r3 @ r0<- vAA (array object)
+ add r1, rPC, r1, lsl #1 @ r1<- PC + BBBBbbbb*2 (array data off.)
+ bl MterpFillArrayData @ (obj, payload)
+ cmp r0, #0 @ 0 means an exception is thrown
+ beq MterpPossibleException @ exception?
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_throw: /* 0x27 */
+/* File: arm/op_throw.S */
+ /*
+ * Throw an exception object in the current thread.
+ */
+ /* throw vAA */
+ EXPORT_PC
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG r1, r2 @ r1<- vAA (exception object)
+ cmp r1, #0 @ null object?
+ beq common_errNullObject @ yes, throw an NPE instead
+ str r1, [rSELF, #THREAD_EXCEPTION_OFFSET] @ thread->exception<- obj
+ b MterpException
+
+/* ------------------------------ */
+ .balign 128
+.L_op_goto: /* 0x28 */
+/* File: arm/op_goto.S */
+ /*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto +AA */
+ /* tuning: use sbfx for 6t2+ targets */
+#if MTERP_SUSPEND
+ mov r0, rINST, lsl #16 @ r0<- AAxx0000
+ movs r1, r0, asr #24 @ r1<- ssssssAA (sign-extended)
+ add r2, r1, r1 @ r2<- byte offset, set flags
+ @ If backwards branch refresh rIBASE
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh handler base
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ mov r0, rINST, lsl #16 @ r0<- AAxx0000
+ movs r1, r0, asr #24 @ r1<- ssssssAA (sign-extended)
+ add r2, r1, r1 @ r2<- byte offset, set flags
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ @ If backwards branch refresh rIBASE
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_op_goto_16: /* 0x29 */
+/* File: arm/op_goto_16.S */
+ /*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto/16 +AAAA */
+#if MTERP_SUSPEND
+ FETCH_S r0, 1 @ r0<- ssssAAAA (sign-extended)
+ adds r1, r0, r0 @ r1<- byte offset, flags set
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh handler base
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ FETCH_S r0, 1 @ r0<- ssssAAAA (sign-extended)
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ adds r1, r0, r0 @ r1<- byte offset, flags set
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_op_goto_32: /* 0x2a */
+/* File: arm/op_goto_32.S */
+ /*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ *
+ * Unlike most opcodes, this one is allowed to branch to itself, so
+ * our "backward branch" test must be "<=0" instead of "<0". Because
+ * we need the V bit set, we'll use an adds to convert from Dalvik
+ * offset to byte offset.
+ */
+ /* goto/32 +AAAAAAAA */
+#if MTERP_SUSPEND
+ FETCH r0, 1 @ r0<- aaaa (lo)
+ FETCH r1, 2 @ r1<- AAAA (hi)
+ orr r0, r0, r1, lsl #16 @ r0<- AAAAaaaa
+ adds r1, r0, r0 @ r1<- byte offset
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ldrle rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh handler base
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ FETCH r0, 1 @ r0<- aaaa (lo)
+ FETCH r1, 2 @ r1<- AAAA (hi)
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ orr r0, r0, r1, lsl #16 @ r0<- AAAAaaaa
+ adds r1, r0, r0 @ r1<- byte offset
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ble MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_op_packed_switch: /* 0x2b */
+/* File: arm/op_packed_switch.S */
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+#if MTERP_SUSPEND
+ FETCH r0, 1 @ r0<- bbbb (lo)
+ FETCH r1, 2 @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_VREG r1, r3 @ r1<- vAA
+ add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2
+ bl MterpDoPackedSwitch @ r0<- code-unit branch offset
+ adds r1, r0, r0 @ r1<- byte offset; clear V
+ ldrle rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh handler base
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ FETCH r0, 1 @ r0<- bbbb (lo)
+ FETCH r1, 2 @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_VREG r1, r3 @ r1<- vAA
+ add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2
+ bl MterpDoPackedSwitch @ r0<- code-unit branch offset
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ adds r1, r0, r0 @ r1<- byte offset; clear V
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ble MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sparse_switch: /* 0x2c */
+/* File: arm/op_sparse_switch.S */
+/* File: arm/op_packed_switch.S */
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+#if MTERP_SUSPEND
+ FETCH r0, 1 @ r0<- bbbb (lo)
+ FETCH r1, 2 @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_VREG r1, r3 @ r1<- vAA
+ add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2
+ bl MterpDoSparseSwitch @ r0<- code-unit branch offset
+ adds r1, r0, r0 @ r1<- byte offset; clear V
+ ldrle rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh handler base
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ FETCH r0, 1 @ r0<- bbbb (lo)
+ FETCH r1, 2 @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_VREG r1, r3 @ r1<- vAA
+ add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2
+ bl MterpDoSparseSwitch @ r0<- code-unit branch offset
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ adds r1, r0, r0 @ r1<- byte offset; clear V
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ble MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmpl_float: /* 0x2d */
+/* File: arm/op_cmpl_float.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x > y) {
+ * return 1;
+ * } else if (x < y) {
+ * return -1;
+ * } else {
+ * return -1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ flds s0, [r2] @ s0<- vBB
+ flds s1, [r3] @ s1<- vCC
+ fcmpes s0, s1 @ compare (vBB, vCC)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mvn r0, #0 @ r0<- -1 (default)
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fmstat @ export status flags
+ movgt r0, #1 @ (greater than) r1<- 1
+ moveq r0, #0 @ (equal) r1<- 0
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmpg_float: /* 0x2e */
+/* File: arm/op_cmpg_float.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x < y) {
+ * return -1;
+ * } else if (x > y) {
+ * return 1;
+ * } else {
+ * return 1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ flds s0, [r2] @ s0<- vBB
+ flds s1, [r3] @ s1<- vCC
+ fcmpes s0, s1 @ compare (vBB, vCC)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mov r0, #1 @ r0<- 1 (default)
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fmstat @ export status flags
+ mvnmi r0, #0 @ (less than) r1<- -1
+ moveq r0, #0 @ (equal) r1<- 0
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmpl_double: /* 0x2f */
+/* File: arm/op_cmpl_double.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x > y) {
+ * return 1;
+ * } else if (x < y) {
+ * return -1;
+ * } else {
+ * return -1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ fldd d0, [r2] @ d0<- vBB
+ fldd d1, [r3] @ d1<- vCC
+ fcmped d0, d1 @ compare (vBB, vCC)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mvn r0, #0 @ r0<- -1 (default)
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fmstat @ export status flags
+ movgt r0, #1 @ (greater than) r1<- 1
+ moveq r0, #0 @ (equal) r1<- 0
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmpg_double: /* 0x30 */
+/* File: arm/op_cmpg_double.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x < y) {
+ * return -1;
+ * } else if (x > y) {
+ * return 1;
+ * } else {
+ * return 1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ fldd d0, [r2] @ d0<- vBB
+ fldd d1, [r3] @ d1<- vCC
+ fcmped d0, d1 @ compare (vBB, vCC)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mov r0, #1 @ r0<- 1 (default)
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fmstat @ export status flags
+ mvnmi r0, #0 @ (less than) r1<- -1
+ moveq r0, #0 @ (equal) r1<- 0
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_cmp_long: /* 0x31 */
+/* File: arm/op_cmp_long.S */
+ /*
+ * Compare two 64-bit values. Puts 0, 1, or -1 into the destination
+ * register based on the results of the comparison.
+ *
+ * We load the full values with LDM, but in practice many values could
+ * be resolved by only looking at the high word. This could be made
+ * faster or slower by splitting the LDM into a pair of LDRs.
+ *
+ * If we just wanted to set condition flags, we could do this:
+ * subs ip, r0, r2
+ * sbcs ip, r1, r3
+ * subeqs ip, r0, r2
+ * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific
+ * integer value, which we can do with 2 conditional mov/mvn instructions
+ * (set 1, set -1; if they're equal we already have 0 in ip), giving
+ * us a constant 5-cycle path plus a branch at the end to the
+ * instruction epilogue code. The multi-compare approach below needs
+ * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch
+ * in the worst case (the 64-bit values are equal).
+ */
+ /* cmp-long vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ cmp r1, r3 @ compare (vBB+1, vCC+1)
+ blt .Lop_cmp_long_less @ signed compare on high part
+ bgt .Lop_cmp_long_greater
+ subs r1, r0, r2 @ r1<- r0 - r2
+ bhi .Lop_cmp_long_greater @ unsigned compare on low part
+ bne .Lop_cmp_long_less
+ b .Lop_cmp_long_finish @ equal; r1 already holds 0
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_eq: /* 0x32 */
+/* File: arm/op_if_eq.S */
+/* File: arm/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+#if MTERP_SUSPEND
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r2, r0 @ r2<- vA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movne r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh rIBASE
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r2, r0 @ r2<- vA
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movne r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_ne: /* 0x33 */
+/* File: arm/op_if_ne.S */
+/* File: arm/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+#if MTERP_SUSPEND
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r2, r0 @ r2<- vA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ moveq r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh rIBASE
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r2, r0 @ r2<- vA
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ moveq r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_lt: /* 0x34 */
+/* File: arm/op_if_lt.S */
+/* File: arm/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+#if MTERP_SUSPEND
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r2, r0 @ r2<- vA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movge r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh rIBASE
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r2, r0 @ r2<- vA
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movge r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_ge: /* 0x35 */
+/* File: arm/op_if_ge.S */
+/* File: arm/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+#if MTERP_SUSPEND
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r2, r0 @ r2<- vA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movlt r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh rIBASE
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r2, r0 @ r2<- vA
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movlt r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_gt: /* 0x36 */
+/* File: arm/op_if_gt.S */
+/* File: arm/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+#if MTERP_SUSPEND
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r2, r0 @ r2<- vA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movle r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh rIBASE
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r2, r0 @ r2<- vA
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movle r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_le: /* 0x37 */
+/* File: arm/op_if_le.S */
+/* File: arm/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+#if MTERP_SUSPEND
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r2, r0 @ r2<- vA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movgt r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh rIBASE
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r2, r0 @ r2<- vA
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movgt r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_eqz: /* 0x38 */
+/* File: arm/op_if_eqz.S */
+/* File: arm/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+#if MTERP_SUSPEND
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG r2, r0 @ r2<- vAA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, #0 @ compare (vA, 0)
+ movne r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh table base
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG r2, r0 @ r2<- vAA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ cmp r2, #0 @ compare (vA, 0)
+ movne r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_nez: /* 0x39 */
+/* File: arm/op_if_nez.S */
+/* File: arm/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+#if MTERP_SUSPEND
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG r2, r0 @ r2<- vAA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, #0 @ compare (vA, 0)
+ moveq r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh table base
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG r2, r0 @ r2<- vAA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ cmp r2, #0 @ compare (vA, 0)
+ moveq r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_ltz: /* 0x3a */
+/* File: arm/op_if_ltz.S */
+/* File: arm/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+#if MTERP_SUSPEND
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG r2, r0 @ r2<- vAA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, #0 @ compare (vA, 0)
+ movge r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh table base
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG r2, r0 @ r2<- vAA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ cmp r2, #0 @ compare (vA, 0)
+ movge r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_gez: /* 0x3b */
+/* File: arm/op_if_gez.S */
+/* File: arm/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+#if MTERP_SUSPEND
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG r2, r0 @ r2<- vAA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, #0 @ compare (vA, 0)
+ movlt r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh table base
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG r2, r0 @ r2<- vAA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ cmp r2, #0 @ compare (vA, 0)
+ movlt r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_gtz: /* 0x3c */
+/* File: arm/op_if_gtz.S */
+/* File: arm/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+#if MTERP_SUSPEND
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG r2, r0 @ r2<- vAA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, #0 @ compare (vA, 0)
+ movle r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh table base
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG r2, r0 @ r2<- vAA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ cmp r2, #0 @ compare (vA, 0)
+ movle r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_if_lez: /* 0x3d */
+/* File: arm/op_if_lez.S */
+/* File: arm/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+#if MTERP_SUSPEND
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG r2, r0 @ r2<- vAA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ cmp r2, #0 @ compare (vA, 0)
+ movgt r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh table base
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#else
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG r2, r0 @ r2<- vAA
+ FETCH_S r1, 1 @ r1<- branch offset, in code units
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ cmp r2, #0 @ compare (vA, 0)
+ movgt r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB r1 @ update rPC, load rINST
+ bmi MterpCheckSuspendAndContinue
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_3e: /* 0x3e */
+/* File: arm/op_unused_3e.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_3f: /* 0x3f */
+/* File: arm/op_unused_3f.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_40: /* 0x40 */
+/* File: arm/op_unused_40.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_41: /* 0x41 */
+/* File: arm/op_unused_41.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_42: /* 0x42 */
+/* File: arm/op_unused_42.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_43: /* 0x43 */
+/* File: arm/op_unused_43.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget: /* 0x44 */
+/* File: arm/op_aget.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ ldr r2, [r0, #MIRROR_INT_ARRAY_DATA_OFFSET] @ r2<- vBB[vCC]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r2, r9 @ vAA<- r2
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_wide: /* 0x45 */
+/* File: arm/op_aget_wide.S */
+ /*
+ * Array get, 64 bits. vAA <- vBB[vCC].
+ *
+ * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD.
+ */
+ /* aget-wide vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ ldrd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_object: /* 0x46 */
+/* File: arm/op_aget_object.S */
+ /*
+ * Array object get. vAA <- vBB[vCC].
+ *
+ * for: aget-object
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ EXPORT_PC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ bl artAGetObjectFromMterp @ (array, index)
+ ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ PREFETCH_INST 2
+ cmp r1, #0
+ bne MterpException
+ SET_VREG_OBJECT r0, r9
+ ADVANCE 2
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_boolean: /* 0x47 */
+/* File: arm/op_aget_boolean.S */
+/* File: arm/op_aget.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ ldrb r2, [r0, #MIRROR_BOOLEAN_ARRAY_DATA_OFFSET] @ r2<- vBB[vCC]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r2, r9 @ vAA<- r2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_byte: /* 0x48 */
+/* File: arm/op_aget_byte.S */
+/* File: arm/op_aget.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ ldrsb r2, [r0, #MIRROR_BYTE_ARRAY_DATA_OFFSET] @ r2<- vBB[vCC]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r2, r9 @ vAA<- r2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_char: /* 0x49 */
+/* File: arm/op_aget_char.S */
+/* File: arm/op_aget.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ ldrh r2, [r0, #MIRROR_CHAR_ARRAY_DATA_OFFSET] @ r2<- vBB[vCC]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r2, r9 @ vAA<- r2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aget_short: /* 0x4a */
+/* File: arm/op_aget_short.S */
+/* File: arm/op_aget.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ ldrsh r2, [r0, #MIRROR_SHORT_ARRAY_DATA_OFFSET] @ r2<- vBB[vCC]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r2, r9 @ vAA<- r2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput: /* 0x4b */
+/* File: arm/op_aput.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_VREG r2, r9 @ r2<- vAA
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ str r2, [r0, #MIRROR_INT_ARRAY_DATA_OFFSET] @ vBB[vCC]<- r2
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_wide: /* 0x4c */
+/* File: arm/op_aput_wide.S */
+ /*
+ * Array put, 64 bits. vBB[vCC] <- vAA.
+ *
+ * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
+ */
+ /* aput-wide vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ strd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_object: /* 0x4d */
+/* File: arm/op_aput_object.S */
+ /*
+ * Store an object into an array. vBB[vCC] <- vAA.
+ */
+ /* op vAA, vBB, vCC */
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rINST
+ bl MterpAputObject
+ cmp r0, #0
+ beq MterpPossibleException
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_boolean: /* 0x4e */
+/* File: arm/op_aput_boolean.S */
+/* File: arm/op_aput.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_VREG r2, r9 @ r2<- vAA
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ strb r2, [r0, #MIRROR_BOOLEAN_ARRAY_DATA_OFFSET] @ vBB[vCC]<- r2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_byte: /* 0x4f */
+/* File: arm/op_aput_byte.S */
+/* File: arm/op_aput.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_VREG r2, r9 @ r2<- vAA
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ strb r2, [r0, #MIRROR_BYTE_ARRAY_DATA_OFFSET] @ vBB[vCC]<- r2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_char: /* 0x50 */
+/* File: arm/op_aput_char.S */
+/* File: arm/op_aput.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_VREG r2, r9 @ r2<- vAA
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ strh r2, [r0, #MIRROR_CHAR_ARRAY_DATA_OFFSET] @ vBB[vCC]<- r2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_aput_short: /* 0x51 */
+/* File: arm/op_aput_short.S */
+/* File: arm/op_aput.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_VREG r2, r9 @ r2<- vAA
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ strh r2, [r0, #MIRROR_SHORT_ARRAY_DATA_OFFSET] @ vBB[vCC]<- r2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget: /* 0x52 */
+/* File: arm/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
+ mov r3, rSELF @ r3<- self
+ bl artGet32InstanceFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ PREFETCH_INST 2
+ cmp r3, #0
+ bne MterpPossibleException @ bail out
+ .if 0
+ SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
+ .else
+ SET_VREG r0, r2 @ fp[A]<- r0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_wide: /* 0x53 */
+/* File: arm/op_iget_wide.S */
+ /*
+ * 64-bit instance field get.
+ *
+ * for: iget-wide
+ */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
+ mov r3, rSELF @ r3<- self
+ bl artGet64InstanceFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ PREFETCH_INST 2
+ cmp r3, #0
+ bne MterpException @ bail out
+ add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
+ stmia r3, {r0-r1} @ fp[A]<- r0/r1
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_object: /* 0x54 */
+/* File: arm/op_iget_object.S */
+/* File: arm/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
+ mov r3, rSELF @ r3<- self
+ bl artGetObjInstanceFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ PREFETCH_INST 2
+ cmp r3, #0
+ bne MterpPossibleException @ bail out
+ .if 1
+ SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
+ .else
+ SET_VREG r0, r2 @ fp[A]<- r0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_boolean: /* 0x55 */
+/* File: arm/op_iget_boolean.S */
+/* File: arm/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
+ mov r3, rSELF @ r3<- self
+ bl artGetBooleanInstanceFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ PREFETCH_INST 2
+ cmp r3, #0
+ bne MterpPossibleException @ bail out
+ .if 0
+ SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
+ .else
+ SET_VREG r0, r2 @ fp[A]<- r0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_byte: /* 0x56 */
+/* File: arm/op_iget_byte.S */
+/* File: arm/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
+ mov r3, rSELF @ r3<- self
+ bl artGetByteInstanceFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ PREFETCH_INST 2
+ cmp r3, #0
+ bne MterpPossibleException @ bail out
+ .if 0
+ SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
+ .else
+ SET_VREG r0, r2 @ fp[A]<- r0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_char: /* 0x57 */
+/* File: arm/op_iget_char.S */
+/* File: arm/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
+ mov r3, rSELF @ r3<- self
+ bl artGetCharInstanceFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ PREFETCH_INST 2
+ cmp r3, #0
+ bne MterpPossibleException @ bail out
+ .if 0
+ SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
+ .else
+ SET_VREG r0, r2 @ fp[A]<- r0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_short: /* 0x58 */
+/* File: arm/op_iget_short.S */
+/* File: arm/op_iget.S */
+ /*
+ * General instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
+ mov r3, rSELF @ r3<- self
+ bl artGetShortInstanceFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ PREFETCH_INST 2
+ cmp r3, #0
+ bne MterpPossibleException @ bail out
+ .if 0
+ SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
+ .else
+ SET_VREG r0, r2 @ fp[A]<- r0
+ .endif
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput: /* 0x59 */
+/* File: arm/op_iput.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ .extern artSet32InstanceFromMterp
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ GET_VREG r2, r2 @ r2<- fp[A]
+ ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
+ PREFETCH_INST 2
+ bl artSet32InstanceFromMterp
+ cmp r0, #0
+ bne MterpPossibleException
+ ADVANCE 2 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_wide: /* 0x5a */
+/* File: arm/op_iput_wide.S */
+ /* iput-wide vA, vB, field@CCCC */
+ .extern artSet64InstanceFromMterp
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[A]
+ ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
+ PREFETCH_INST 2
+ bl artSet64InstanceFromMterp
+ cmp r0, #0
+ bne MterpPossibleException
+ ADVANCE 2 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_object: /* 0x5b */
+/* File: arm/op_iput_object.S */
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rINST
+ mov r3, rSELF
+ bl MterpIputObject
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_boolean: /* 0x5c */
+/* File: arm/op_iput_boolean.S */
+/* File: arm/op_iput.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ .extern artSet8InstanceFromMterp
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ GET_VREG r2, r2 @ r2<- fp[A]
+ ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
+ PREFETCH_INST 2
+ bl artSet8InstanceFromMterp
+ cmp r0, #0
+ bne MterpPossibleException
+ ADVANCE 2 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_byte: /* 0x5d */
+/* File: arm/op_iput_byte.S */
+/* File: arm/op_iput.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ .extern artSet8InstanceFromMterp
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ GET_VREG r2, r2 @ r2<- fp[A]
+ ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
+ PREFETCH_INST 2
+ bl artSet8InstanceFromMterp
+ cmp r0, #0
+ bne MterpPossibleException
+ ADVANCE 2 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_char: /* 0x5e */
+/* File: arm/op_iput_char.S */
+/* File: arm/op_iput.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ .extern artSet16InstanceFromMterp
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ GET_VREG r2, r2 @ r2<- fp[A]
+ ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
+ PREFETCH_INST 2
+ bl artSet16InstanceFromMterp
+ cmp r0, #0
+ bne MterpPossibleException
+ ADVANCE 2 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_short: /* 0x5f */
+/* File: arm/op_iput_short.S */
+/* File: arm/op_iput.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ .extern artSet16InstanceFromMterp
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ GET_VREG r1, r1 @ r1<- fp[B], the object pointer
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ GET_VREG r2, r2 @ r2<- fp[A]
+ ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
+ PREFETCH_INST 2
+ bl artSet16InstanceFromMterp
+ cmp r0, #0
+ bne MterpPossibleException
+ ADVANCE 2 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget: /* 0x60 */
+/* File: arm/op_sget.S */
+ /*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+
+ .extern artGet32StaticFromCode
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ ldr r1, [rFP, #OFF_FP_METHOD]
+ mov r2, rSELF
+ bl artGet32StaticFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ mov r2, rINST, lsr #8 @ r2<- AA
+ PREFETCH_INST 2
+ cmp r3, #0 @ Fail to resolve?
+ bne MterpException @ bail out
+.if 0
+ SET_VREG_OBJECT r0, r2 @ fp[AA]<- r0
+.else
+ SET_VREG r0, r2 @ fp[AA]<- r0
+.endif
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_wide: /* 0x61 */
+/* File: arm/op_sget_wide.S */
+ /*
+ * SGET_WIDE handler wrapper.
+ *
+ */
+ /* sget-wide vAA, field@BBBB */
+
+ .extern artGet64StaticFromCode
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ ldr r1, [rFP, #OFF_FP_METHOD]
+ mov r2, rSELF
+ bl artGet64StaticFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ mov r9, rINST, lsr #8 @ r9<- AA
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ cmp r3, #0 @ Fail to resolve?
+ bne MterpException @ bail out
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_object: /* 0x62 */
+/* File: arm/op_sget_object.S */
+/* File: arm/op_sget.S */
+ /*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+
+ .extern artGetObjStaticFromCode
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ ldr r1, [rFP, #OFF_FP_METHOD]
+ mov r2, rSELF
+ bl artGetObjStaticFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ mov r2, rINST, lsr #8 @ r2<- AA
+ PREFETCH_INST 2
+ cmp r3, #0 @ Fail to resolve?
+ bne MterpException @ bail out
+.if 1
+ SET_VREG_OBJECT r0, r2 @ fp[AA]<- r0
+.else
+ SET_VREG r0, r2 @ fp[AA]<- r0
+.endif
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_boolean: /* 0x63 */
+/* File: arm/op_sget_boolean.S */
+/* File: arm/op_sget.S */
+ /*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+
+ .extern artGetBooleanStaticFromCode
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ ldr r1, [rFP, #OFF_FP_METHOD]
+ mov r2, rSELF
+ bl artGetBooleanStaticFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ mov r2, rINST, lsr #8 @ r2<- AA
+ PREFETCH_INST 2
+ cmp r3, #0 @ Fail to resolve?
+ bne MterpException @ bail out
+.if 0
+ SET_VREG_OBJECT r0, r2 @ fp[AA]<- r0
+.else
+ SET_VREG r0, r2 @ fp[AA]<- r0
+.endif
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_byte: /* 0x64 */
+/* File: arm/op_sget_byte.S */
+/* File: arm/op_sget.S */
+ /*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+
+ .extern artGetByteStaticFromCode
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ ldr r1, [rFP, #OFF_FP_METHOD]
+ mov r2, rSELF
+ bl artGetByteStaticFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ mov r2, rINST, lsr #8 @ r2<- AA
+ PREFETCH_INST 2
+ cmp r3, #0 @ Fail to resolve?
+ bne MterpException @ bail out
+.if 0
+ SET_VREG_OBJECT r0, r2 @ fp[AA]<- r0
+.else
+ SET_VREG r0, r2 @ fp[AA]<- r0
+.endif
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_char: /* 0x65 */
+/* File: arm/op_sget_char.S */
+/* File: arm/op_sget.S */
+ /*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+
+ .extern artGetCharStaticFromCode
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ ldr r1, [rFP, #OFF_FP_METHOD]
+ mov r2, rSELF
+ bl artGetCharStaticFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ mov r2, rINST, lsr #8 @ r2<- AA
+ PREFETCH_INST 2
+ cmp r3, #0 @ Fail to resolve?
+ bne MterpException @ bail out
+.if 0
+ SET_VREG_OBJECT r0, r2 @ fp[AA]<- r0
+.else
+ SET_VREG r0, r2 @ fp[AA]<- r0
+.endif
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sget_short: /* 0x66 */
+/* File: arm/op_sget_short.S */
+/* File: arm/op_sget.S */
+ /*
+ * General SGET handler wrapper.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+
+ .extern artGetShortStaticFromCode
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ ldr r1, [rFP, #OFF_FP_METHOD]
+ mov r2, rSELF
+ bl artGetShortStaticFromCode
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ mov r2, rINST, lsr #8 @ r2<- AA
+ PREFETCH_INST 2
+ cmp r3, #0 @ Fail to resolve?
+ bne MterpException @ bail out
+.if 0
+ SET_VREG_OBJECT r0, r2 @ fp[AA]<- r0
+.else
+ SET_VREG r0, r2 @ fp[AA]<- r0
+.endif
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput: /* 0x67 */
+/* File: arm/op_sput.S */
+ /*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ mov r3, rINST, lsr #8 @ r3<- AA
+ GET_VREG r1, r3 @ r1<= fp[AA]
+ ldr r2, [rFP, #OFF_FP_METHOD]
+ mov r3, rSELF
+ PREFETCH_INST 2 @ Get next inst, but don't advance rPC
+ bl artSet32StaticFromCode
+ cmp r0, #0 @ 0 on success, -1 on failure
+ bne MterpException
+ ADVANCE 2 @ Past exception point - now advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_wide: /* 0x68 */
+/* File: arm/op_sput_wide.S */
+ /*
+ * SPUT_WIDE handler wrapper.
+ *
+ */
+ /* sput-wide vAA, field@BBBB */
+ .extern artSet64IndirectStaticFromMterp
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ ldr r1, [rFP, #OFF_FP_METHOD]
+ mov r2, rINST, lsr #8 @ r3<- AA
+ add r2, rFP, r2, lsl #2
+ mov r3, rSELF
+ PREFETCH_INST 2 @ Get next inst, but don't advance rPC
+ bl artSet64IndirectStaticFromMterp
+ cmp r0, #0 @ 0 on success, -1 on failure
+ bne MterpException
+ ADVANCE 2 @ Past exception point - now advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_object: /* 0x69 */
+/* File: arm/op_sput_object.S */
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rINST
+ mov r3, rSELF
+ bl MterpSputObject
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_boolean: /* 0x6a */
+/* File: arm/op_sput_boolean.S */
+/* File: arm/op_sput.S */
+ /*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ mov r3, rINST, lsr #8 @ r3<- AA
+ GET_VREG r1, r3 @ r1<= fp[AA]
+ ldr r2, [rFP, #OFF_FP_METHOD]
+ mov r3, rSELF
+ PREFETCH_INST 2 @ Get next inst, but don't advance rPC
+ bl artSet8StaticFromCode
+ cmp r0, #0 @ 0 on success, -1 on failure
+ bne MterpException
+ ADVANCE 2 @ Past exception point - now advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_byte: /* 0x6b */
+/* File: arm/op_sput_byte.S */
+/* File: arm/op_sput.S */
+ /*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ mov r3, rINST, lsr #8 @ r3<- AA
+ GET_VREG r1, r3 @ r1<= fp[AA]
+ ldr r2, [rFP, #OFF_FP_METHOD]
+ mov r3, rSELF
+ PREFETCH_INST 2 @ Get next inst, but don't advance rPC
+ bl artSet8StaticFromCode
+ cmp r0, #0 @ 0 on success, -1 on failure
+ bne MterpException
+ ADVANCE 2 @ Past exception point - now advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_char: /* 0x6c */
+/* File: arm/op_sput_char.S */
+/* File: arm/op_sput.S */
+ /*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ mov r3, rINST, lsr #8 @ r3<- AA
+ GET_VREG r1, r3 @ r1<= fp[AA]
+ ldr r2, [rFP, #OFF_FP_METHOD]
+ mov r3, rSELF
+ PREFETCH_INST 2 @ Get next inst, but don't advance rPC
+ bl artSet16StaticFromCode
+ cmp r0, #0 @ 0 on success, -1 on failure
+ bne MterpException
+ ADVANCE 2 @ Past exception point - now advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sput_short: /* 0x6d */
+/* File: arm/op_sput_short.S */
+/* File: arm/op_sput.S */
+ /*
+ * General SPUT handler wrapper.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- field ref BBBB
+ mov r3, rINST, lsr #8 @ r3<- AA
+ GET_VREG r1, r3 @ r1<= fp[AA]
+ ldr r2, [rFP, #OFF_FP_METHOD]
+ mov r3, rSELF
+ PREFETCH_INST 2 @ Get next inst, but don't advance rPC
+ bl artSet16StaticFromCode
+ cmp r0, #0 @ 0 on success, -1 on failure
+ bne MterpException
+ ADVANCE 2 @ Past exception point - now advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_virtual: /* 0x6e */
+/* File: arm/op_invoke_virtual.S */
+/* File: arm/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeVirtual
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl MterpInvokeVirtual
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+ /*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_super: /* 0x6f */
+/* File: arm/op_invoke_super.S */
+/* File: arm/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeSuper
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl MterpInvokeSuper
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+ /*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_direct: /* 0x70 */
+/* File: arm/op_invoke_direct.S */
+/* File: arm/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeDirect
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl MterpInvokeDirect
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_static: /* 0x71 */
+/* File: arm/op_invoke_static.S */
+/* File: arm/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeStatic
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl MterpInvokeStatic
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_interface: /* 0x72 */
+/* File: arm/op_invoke_interface.S */
+/* File: arm/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeInterface
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl MterpInvokeInterface
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+ /*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_return_void_no_barrier: /* 0x73 */
+/* File: arm/op_return_void_no_barrier.S */
+ mov r0, #0
+ mov r1, #0
+ b MterpReturn
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_virtual_range: /* 0x74 */
+/* File: arm/op_invoke_virtual_range.S */
+/* File: arm/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeVirtualRange
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl MterpInvokeVirtualRange
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_super_range: /* 0x75 */
+/* File: arm/op_invoke_super_range.S */
+/* File: arm/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeSuperRange
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl MterpInvokeSuperRange
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_direct_range: /* 0x76 */
+/* File: arm/op_invoke_direct_range.S */
+/* File: arm/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeDirectRange
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl MterpInvokeDirectRange
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_static_range: /* 0x77 */
+/* File: arm/op_invoke_static_range.S */
+/* File: arm/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeStaticRange
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl MterpInvokeStaticRange
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_interface_range: /* 0x78 */
+/* File: arm/op_invoke_interface_range.S */
+/* File: arm/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeInterfaceRange
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl MterpInvokeInterfaceRange
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_79: /* 0x79 */
+/* File: arm/op_unused_79.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_7a: /* 0x7a */
+/* File: arm/op_unused_7a.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_neg_int: /* 0x7b */
+/* File: arm/op_neg_int.S */
+/* File: arm/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r3 @ r0<- vB
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ rsb r0, r0, #0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 8-9 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_not_int: /* 0x7c */
+/* File: arm/op_not_int.S */
+/* File: arm/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r3 @ r0<- vB
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ mvn r0, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 8-9 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_neg_long: /* 0x7d */
+/* File: arm/op_neg_long.S */
+/* File: arm/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ rsbs r0, r0, #0 @ optional op; may set condition codes
+ rsc r1, r1, #0 @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_not_long: /* 0x7e */
+/* File: arm/op_not_long.S */
+/* File: arm/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ mvn r0, r0 @ optional op; may set condition codes
+ mvn r1, r1 @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_neg_float: /* 0x7f */
+/* File: arm/op_neg_float.S */
+/* File: arm/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r3 @ r0<- vB
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ add r0, r0, #0x80000000 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 8-9 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_neg_double: /* 0x80 */
+/* File: arm/op_neg_double.S */
+/* File: arm/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ add r1, r1, #0x80000000 @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_long: /* 0x81 */
+/* File: arm/op_int_to_long.S */
+/* File: arm/unopWider.S */
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0", where
+ * "result" is a 64-bit quantity in r0/r1.
+ *
+ * For: int-to-long, int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r3 @ r0<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ mov r1, r0, asr #31 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_float: /* 0x82 */
+/* File: arm/op_int_to_float.S */
+/* File: arm/funop.S */
+ /*
+ * Generic 32-bit unary floating-point operation. Provide an "instr"
+ * line that specifies an instruction that performs "s1 = op s0".
+ *
+ * for: int-to-float, float-to-int
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ flds s0, [r3] @ s0<- vB
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ and r9, r9, #15 @ r9<- A
+ fsitos s1, s0 @ s1<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ fsts s1, [r9] @ vA<- s1
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_double: /* 0x83 */
+/* File: arm/op_int_to_double.S */
+/* File: arm/funopWider.S */
+ /*
+ * Generic 32bit-to-64bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "d0 = op s0".
+ *
+ * For: int-to-double, float-to-double
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ flds s0, [r3] @ s0<- vB
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ and r9, r9, #15 @ r9<- A
+ fsitod d0, s0 @ d0<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ fstd d0, [r9] @ vA<- d0
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_long_to_int: /* 0x84 */
+/* File: arm/op_long_to_int.S */
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+/* File: arm/op_move.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B from 15:12
+ ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ GET_VREG r2, r1 @ r2<- fp[B]
+ GET_INST_OPCODE ip @ ip<- opcode from rINST
+ .if 0
+ SET_VREG_OBJECT r2, r0 @ fp[A]<- r2
+ .else
+ SET_VREG r2, r0 @ fp[A]<- r2
+ .endif
+ GOTO_OPCODE ip @ execute next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_long_to_float: /* 0x85 */
+/* File: arm/op_long_to_float.S */
+/* File: arm/unopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0/r1", where
+ * "result" is a 32-bit quantity in r0.
+ *
+ * For: long-to-float, double-to-int, double-to-float
+ *
+ * (This would work for long-to-int, but that instruction is actually
+ * an exact match for op_move.)
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_l2f @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_long_to_double: /* 0x86 */
+/* File: arm/op_long_to_double.S */
+ /*
+ * Specialised 64-bit floating point operation.
+ *
+ * Note: The result will be returned in d2.
+ *
+ * For: long-to-double
+ */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ vldr d0, [r3] @ d0<- vAA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ vcvt.f64.s32 d1, s1 @ d1<- (double)(vAAh)
+ vcvt.f64.u32 d2, s0 @ d2<- (double)(vAAl)
+ vldr d3, constvalop_long_to_double
+ vmla.f64 d2, d1, d3 @ d2<- vAAh*2^32 + vAAl
+
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ vstr.64 d2, [r9] @ vAA<- d2
+ GOTO_OPCODE ip @ jump to next instruction
+
+ /* literal pool helper */
+constvalop_long_to_double:
+ .8byte 0x41f0000000000000
+
+/* ------------------------------ */
+ .balign 128
+.L_op_float_to_int: /* 0x87 */
+/* File: arm/op_float_to_int.S */
+/* File: arm/funop.S */
+ /*
+ * Generic 32-bit unary floating-point operation. Provide an "instr"
+ * line that specifies an instruction that performs "s1 = op s0".
+ *
+ * for: int-to-float, float-to-int
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ flds s0, [r3] @ s0<- vB
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ and r9, r9, #15 @ r9<- A
+ ftosizs s1, s0 @ s1<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ fsts s1, [r9] @ vA<- s1
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_float_to_long: /* 0x88 */
+/* File: arm/op_float_to_long.S */
+@include "arm/unopWider.S" {"instr":"bl __aeabi_f2lz"}
+/* File: arm/unopWider.S */
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0", where
+ * "result" is a 64-bit quantity in r0/r1.
+ *
+ * For: int-to-long, int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r3 @ r0<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ bl f2l_doconv @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 9-10 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_float_to_double: /* 0x89 */
+/* File: arm/op_float_to_double.S */
+/* File: arm/funopWider.S */
+ /*
+ * Generic 32bit-to-64bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "d0 = op s0".
+ *
+ * For: int-to-double, float-to-double
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ flds s0, [r3] @ s0<- vB
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ and r9, r9, #15 @ r9<- A
+ fcvtds d0, s0 @ d0<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ fstd d0, [r9] @ vA<- d0
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_double_to_int: /* 0x8a */
+/* File: arm/op_double_to_int.S */
+/* File: arm/funopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit unary floating point operation. Provide an
+ * "instr" line that specifies an instruction that performs "s0 = op d0".
+ *
+ * For: double-to-int, double-to-float
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ fldd d0, [r3] @ d0<- vB
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ and r9, r9, #15 @ r9<- A
+ ftosizd s0, d0 @ s0<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ fsts s0, [r9] @ vA<- s0
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_double_to_long: /* 0x8b */
+/* File: arm/op_double_to_long.S */
+@include "arm/unopWide.S" {"instr":"bl __aeabi_d2lz"}
+/* File: arm/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl d2l_doconv @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-11 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_double_to_float: /* 0x8c */
+/* File: arm/op_double_to_float.S */
+/* File: arm/funopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit unary floating point operation. Provide an
+ * "instr" line that specifies an instruction that performs "s0 = op d0".
+ *
+ * For: double-to-int, double-to-float
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ fldd d0, [r3] @ d0<- vB
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ and r9, r9, #15 @ r9<- A
+ fcvtsd s0, d0 @ s0<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ fsts s0, [r9] @ vA<- s0
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_byte: /* 0x8d */
+/* File: arm/op_int_to_byte.S */
+/* File: arm/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r3 @ r0<- vB
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ sxtb r0, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 8-9 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_char: /* 0x8e */
+/* File: arm/op_int_to_char.S */
+/* File: arm/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r3 @ r0<- vB
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ uxth r0, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 8-9 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_int_to_short: /* 0x8f */
+/* File: arm/op_int_to_short.S */
+/* File: arm/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r3 @ r0<- vB
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ sxth r0, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 8-9 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_int: /* 0x90 */
+/* File: arm/op_add_int.S */
+/* File: arm/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ add r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_int: /* 0x91 */
+/* File: arm/op_sub_int.S */
+/* File: arm/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ sub r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_int: /* 0x92 */
+/* File: arm/op_mul_int.S */
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+/* File: arm/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ mul r0, r1, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_int: /* 0x93 */
+/* File: arm/op_div_int.S */
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * div-int
+ *
+ */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r0, r0, r1 @ r0<- op
+#else
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_int: /* 0x94 */
+/* File: arm/op_rem_int.S */
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * NOTE: idivmod returns quotient in r0 and remainder in r1
+ *
+ * rem-int
+ *
+ */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r2, r0, r1
+ mls r1, r1, r2, r0 @ r1<- op, r0-r2 changed
+#else
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r1, r9 @ vAA<- r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_int: /* 0x95 */
+/* File: arm/op_and_int.S */
+/* File: arm/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ and r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_int: /* 0x96 */
+/* File: arm/op_or_int.S */
+/* File: arm/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ orr r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_int: /* 0x97 */
+/* File: arm/op_xor_int.S */
+/* File: arm/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ eor r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_int: /* 0x98 */
+/* File: arm/op_shl_int.S */
+/* File: arm/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asl r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_int: /* 0x99 */
+/* File: arm/op_shr_int.S */
+/* File: arm/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_int: /* 0x9a */
+/* File: arm/op_ushr_int.S */
+/* File: arm/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_long: /* 0x9b */
+/* File: arm/op_add_long.S */
+/* File: arm/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ adds r0, r0, r2 @ optional op; may set condition codes
+ adc r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_long: /* 0x9c */
+/* File: arm/op_sub_long.S */
+/* File: arm/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ subs r0, r0, r2 @ optional op; may set condition codes
+ sbc r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_long: /* 0x9d */
+/* File: arm/op_mul_long.S */
+ /*
+ * Signed 64-bit integer multiply.
+ *
+ * Consider WXxYZ (r1r0 x r3r2) with a long multiply:
+ * WX
+ * x YZ
+ * --------
+ * ZW ZX
+ * YW YX
+ *
+ * The low word of the result holds ZX, the high word holds
+ * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because
+ * it doesn't fit in the low 64 bits.
+ *
+ * Unlike most ARM math operations, multiply instructions have
+ * restrictions on using the same register more than once (Rd and Rm
+ * cannot be the same).
+ */
+ /* mul-long vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ mul ip, r2, r1 @ ip<- ZxW
+ umull r9, r10, r2, r0 @ r9/r10 <- ZxX
+ mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
+ mov r0, rINST, lsr #8 @ r0<- AA
+ add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
+ add r0, rFP, r0, lsl #2 @ r0<- &fp[AA]
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_long: /* 0x9e */
+/* File: arm/op_div_long.S */
+/* File: arm/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 1
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ldivmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_long: /* 0x9f */
+/* File: arm/op_rem_long.S */
+/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
+/* File: arm/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 1
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ldivmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_long: /* 0xa0 */
+/* File: arm/op_and_long.S */
+/* File: arm/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ and r0, r0, r2 @ optional op; may set condition codes
+ and r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_long: /* 0xa1 */
+/* File: arm/op_or_long.S */
+/* File: arm/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ orr r0, r0, r2 @ optional op; may set condition codes
+ orr r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_long: /* 0xa2 */
+/* File: arm/op_xor_long.S */
+/* File: arm/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ eor r0, r0, r2 @ optional op; may set condition codes
+ eor r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_long: /* 0xa3 */
+/* File: arm/op_shl_long.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* shl-long vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r3, r0, #255 @ r3<- BB
+ mov r0, r0, lsr #8 @ r0<- CC
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ GET_VREG r2, r0 @ r2<- vCC
+ ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+
+ mov r1, r1, asl r2 @ r1<- r1 << r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mov r0, r0, asl r2 @ r0<- r0 << r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_long: /* 0xa4 */
+/* File: arm/op_shr_long.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* shr-long vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r3, r0, #255 @ r3<- BB
+ mov r0, r0, lsr #8 @ r0<- CC
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ GET_VREG r2, r0 @ r2<- vCC
+ ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ and r2, r2, #63 @ r0<- r0 & 0x3f
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mov r1, r1, asr r2 @ r1<- r1 >> r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_long: /* 0xa5 */
+/* File: arm/op_ushr_long.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* ushr-long vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r3, r0, #255 @ r3<- BB
+ mov r0, r0, lsr #8 @ r0<- CC
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ GET_VREG r2, r0 @ r2<- vCC
+ ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ and r2, r2, #63 @ r0<- r0 & 0x3f
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mov r1, r1, lsr r2 @ r1<- r1 >>> r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_float: /* 0xa6 */
+/* File: arm/op_add_float.S */
+/* File: arm/fbinop.S */
+ /*
+ * Generic 32-bit floating-point operation. Provide an "instr" line that
+ * specifies an instruction that performs "s2 = s0 op s1". Because we
+ * use the "softfp" ABI, this must be an instruction, not a function call.
+ *
+ * For: add-float, sub-float, mul-float, div-float
+ */
+ /* floatop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ flds s1, [r3] @ s1<- vCC
+ flds s0, [r2] @ s0<- vBB
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ fadds s2, s0, s1 @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_float: /* 0xa7 */
+/* File: arm/op_sub_float.S */
+/* File: arm/fbinop.S */
+ /*
+ * Generic 32-bit floating-point operation. Provide an "instr" line that
+ * specifies an instruction that performs "s2 = s0 op s1". Because we
+ * use the "softfp" ABI, this must be an instruction, not a function call.
+ *
+ * For: add-float, sub-float, mul-float, div-float
+ */
+ /* floatop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ flds s1, [r3] @ s1<- vCC
+ flds s0, [r2] @ s0<- vBB
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ fsubs s2, s0, s1 @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_float: /* 0xa8 */
+/* File: arm/op_mul_float.S */
+/* File: arm/fbinop.S */
+ /*
+ * Generic 32-bit floating-point operation. Provide an "instr" line that
+ * specifies an instruction that performs "s2 = s0 op s1". Because we
+ * use the "softfp" ABI, this must be an instruction, not a function call.
+ *
+ * For: add-float, sub-float, mul-float, div-float
+ */
+ /* floatop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ flds s1, [r3] @ s1<- vCC
+ flds s0, [r2] @ s0<- vBB
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ fmuls s2, s0, s1 @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_float: /* 0xa9 */
+/* File: arm/op_div_float.S */
+/* File: arm/fbinop.S */
+ /*
+ * Generic 32-bit floating-point operation. Provide an "instr" line that
+ * specifies an instruction that performs "s2 = s0 op s1". Because we
+ * use the "softfp" ABI, this must be an instruction, not a function call.
+ *
+ * For: add-float, sub-float, mul-float, div-float
+ */
+ /* floatop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ flds s1, [r3] @ s1<- vCC
+ flds s0, [r2] @ s0<- vBB
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ fdivs s2, s0, s1 @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_float: /* 0xaa */
+/* File: arm/op_rem_float.S */
+/* EABI doesn't define a float remainder function, but libm does */
+/* File: arm/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl fmodf @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_double: /* 0xab */
+/* File: arm/op_add_double.S */
+/* File: arm/fbinopWide.S */
+ /*
+ * Generic 64-bit double-precision floating point binary operation.
+ * Provide an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * for: add-double, sub-double, mul-double, div-double
+ */
+ /* doubleop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ fldd d1, [r3] @ d1<- vCC
+ fldd d0, [r2] @ d0<- vBB
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ faddd d2, d0, d1 @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_double: /* 0xac */
+/* File: arm/op_sub_double.S */
+/* File: arm/fbinopWide.S */
+ /*
+ * Generic 64-bit double-precision floating point binary operation.
+ * Provide an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * for: add-double, sub-double, mul-double, div-double
+ */
+ /* doubleop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ fldd d1, [r3] @ d1<- vCC
+ fldd d0, [r2] @ d0<- vBB
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ fsubd d2, d0, d1 @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_double: /* 0xad */
+/* File: arm/op_mul_double.S */
+/* File: arm/fbinopWide.S */
+ /*
+ * Generic 64-bit double-precision floating point binary operation.
+ * Provide an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * for: add-double, sub-double, mul-double, div-double
+ */
+ /* doubleop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ fldd d1, [r3] @ d1<- vCC
+ fldd d0, [r2] @ d0<- vBB
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ fmuld d2, d0, d1 @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_double: /* 0xae */
+/* File: arm/op_div_double.S */
+/* File: arm/fbinopWide.S */
+ /*
+ * Generic 64-bit double-precision floating point binary operation.
+ * Provide an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * for: add-double, sub-double, mul-double, div-double
+ */
+ /* doubleop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ fldd d1, [r3] @ d1<- vCC
+ fldd d0, [r2] @ d0<- vBB
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ fdivd d2, d0, d1 @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_double: /* 0xaf */
+/* File: arm/op_rem_double.S */
+/* EABI doesn't define a double remainder function, but libm does */
+/* File: arm/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl fmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_int_2addr: /* 0xb0 */
+/* File: arm/op_add_int_2addr.S */
+/* File: arm/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ add r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_int_2addr: /* 0xb1 */
+/* File: arm/op_sub_int_2addr.S */
+/* File: arm/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ sub r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_int_2addr: /* 0xb2 */
+/* File: arm/op_mul_int_2addr.S */
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+/* File: arm/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ mul r0, r1, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_int_2addr: /* 0xb3 */
+/* File: arm/op_div_int_2addr.S */
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * div-int/2addr
+ *
+ */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r0, r0, r1 @ r0<- op
+#else
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_int_2addr: /* 0xb4 */
+/* File: arm/op_rem_int_2addr.S */
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * NOTE: idivmod returns quotient in r0 and remainder in r1
+ *
+ * rem-int/2addr
+ *
+ */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r2, r0, r1
+ mls r1, r1, r2, r0 @ r1<- op
+#else
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r1, r9 @ vAA<- r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_int_2addr: /* 0xb5 */
+/* File: arm/op_and_int_2addr.S */
+/* File: arm/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ and r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_int_2addr: /* 0xb6 */
+/* File: arm/op_or_int_2addr.S */
+/* File: arm/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ orr r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_int_2addr: /* 0xb7 */
+/* File: arm/op_xor_int_2addr.S */
+/* File: arm/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ eor r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_int_2addr: /* 0xb8 */
+/* File: arm/op_shl_int_2addr.S */
+/* File: arm/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asl r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_int_2addr: /* 0xb9 */
+/* File: arm/op_shr_int_2addr.S */
+/* File: arm/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_int_2addr: /* 0xba */
+/* File: arm/op_ushr_int_2addr.S */
+/* File: arm/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_long_2addr: /* 0xbb */
+/* File: arm/op_add_long_2addr.S */
+/* File: arm/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ adds r0, r0, r2 @ optional op; may set condition codes
+ adc r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_long_2addr: /* 0xbc */
+/* File: arm/op_sub_long_2addr.S */
+/* File: arm/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ subs r0, r0, r2 @ optional op; may set condition codes
+ sbc r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_long_2addr: /* 0xbd */
+/* File: arm/op_mul_long_2addr.S */
+ /*
+ * Signed 64-bit integer multiply, "/2addr" version.
+ *
+ * See op_mul_long for an explanation.
+ *
+ * We get a little tight on registers, so to avoid looking up &fp[A]
+ * again we stuff it into rINST.
+ */
+ /* mul-long/2addr vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1
+ mul ip, r2, r1 @ ip<- ZxW
+ umull r9, r10, r2, r0 @ r9/r10 <- ZxX
+ mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
+ mov r0, rINST @ r0<- &fp[A] (free up rINST)
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_long_2addr: /* 0xbe */
+/* File: arm/op_div_long_2addr.S */
+/* File: arm/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 1
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ldivmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_long_2addr: /* 0xbf */
+/* File: arm/op_rem_long_2addr.S */
+/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
+/* File: arm/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 1
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ldivmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_long_2addr: /* 0xc0 */
+/* File: arm/op_and_long_2addr.S */
+/* File: arm/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ and r0, r0, r2 @ optional op; may set condition codes
+ and r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_long_2addr: /* 0xc1 */
+/* File: arm/op_or_long_2addr.S */
+/* File: arm/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ orr r0, r0, r2 @ optional op; may set condition codes
+ orr r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_long_2addr: /* 0xc2 */
+/* File: arm/op_xor_long_2addr.S */
+/* File: arm/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ eor r0, r0, r2 @ optional op; may set condition codes
+ eor r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_long_2addr: /* 0xc3 */
+/* File: arm/op_shl_long_2addr.S */
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shl-long/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r2, r3 @ r2<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+
+ mov r1, r1, asl r2 @ r1<- r1 << r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
+ mov r0, r0, asl r2 @ r0<- r0 << r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_long_2addr: /* 0xc4 */
+/* File: arm/op_shr_long_2addr.S */
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shr-long/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r2, r3 @ r2<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
+ mov r1, r1, asr r2 @ r1<- r1 >> r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_long_2addr: /* 0xc5 */
+/* File: arm/op_ushr_long_2addr.S */
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* ushr-long/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r2, r3 @ r2<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
+ mov r1, r1, lsr r2 @ r1<- r1 >>> r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_float_2addr: /* 0xc6 */
+/* File: arm/op_add_float_2addr.S */
+/* File: arm/fbinop2addr.S */
+ /*
+ * Generic 32-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "s2 = s0 op s1".
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ and r9, r9, #15 @ r9<- A
+ flds s1, [r3] @ s1<- vB
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ flds s0, [r9] @ s0<- vA
+
+ fadds s2, s0, s1 @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_float_2addr: /* 0xc7 */
+/* File: arm/op_sub_float_2addr.S */
+/* File: arm/fbinop2addr.S */
+ /*
+ * Generic 32-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "s2 = s0 op s1".
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ and r9, r9, #15 @ r9<- A
+ flds s1, [r3] @ s1<- vB
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ flds s0, [r9] @ s0<- vA
+
+ fsubs s2, s0, s1 @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_float_2addr: /* 0xc8 */
+/* File: arm/op_mul_float_2addr.S */
+/* File: arm/fbinop2addr.S */
+ /*
+ * Generic 32-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "s2 = s0 op s1".
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ and r9, r9, #15 @ r9<- A
+ flds s1, [r3] @ s1<- vB
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ flds s0, [r9] @ s0<- vA
+
+ fmuls s2, s0, s1 @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_float_2addr: /* 0xc9 */
+/* File: arm/op_div_float_2addr.S */
+/* File: arm/fbinop2addr.S */
+ /*
+ * Generic 32-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "s2 = s0 op s1".
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ and r9, r9, #15 @ r9<- A
+ flds s1, [r3] @ s1<- vB
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ flds s0, [r9] @ s0<- vA
+
+ fdivs s2, s0, s1 @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_float_2addr: /* 0xca */
+/* File: arm/op_rem_float_2addr.S */
+/* EABI doesn't define a float remainder function, but libm does */
+/* File: arm/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl fmodf @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_double_2addr: /* 0xcb */
+/* File: arm/op_add_double_2addr.S */
+/* File: arm/fbinopWide2addr.S */
+ /*
+ * Generic 64-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+ * div-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ and r9, r9, #15 @ r9<- A
+ fldd d1, [r3] @ d1<- vB
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ fldd d0, [r9] @ d0<- vA
+
+ faddd d2, d0, d1 @ d2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_sub_double_2addr: /* 0xcc */
+/* File: arm/op_sub_double_2addr.S */
+/* File: arm/fbinopWide2addr.S */
+ /*
+ * Generic 64-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+ * div-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ and r9, r9, #15 @ r9<- A
+ fldd d1, [r3] @ d1<- vB
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ fldd d0, [r9] @ d0<- vA
+
+ fsubd d2, d0, d1 @ d2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_double_2addr: /* 0xcd */
+/* File: arm/op_mul_double_2addr.S */
+/* File: arm/fbinopWide2addr.S */
+ /*
+ * Generic 64-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+ * div-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ and r9, r9, #15 @ r9<- A
+ fldd d1, [r3] @ d1<- vB
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ fldd d0, [r9] @ d0<- vA
+
+ fmuld d2, d0, d1 @ d2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_double_2addr: /* 0xce */
+/* File: arm/op_div_double_2addr.S */
+/* File: arm/fbinopWide2addr.S */
+ /*
+ * Generic 64-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+ * div-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ and r9, r9, #15 @ r9<- A
+ fldd d1, [r3] @ d1<- vB
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ fldd d0, [r9] @ d0<- vA
+
+ fdivd d2, d0, d1 @ d2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_double_2addr: /* 0xcf */
+/* File: arm/op_rem_double_2addr.S */
+/* EABI doesn't define a double remainder function, but libm does */
+/* File: arm/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl fmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_int_lit16: /* 0xd0 */
+/* File: arm/op_add_int_lit16.S */
+/* File: arm/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r2 @ r0<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ add r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rsub_int: /* 0xd1 */
+/* File: arm/op_rsub_int.S */
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+/* File: arm/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r2 @ r0<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ rsb r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_int_lit16: /* 0xd2 */
+/* File: arm/op_mul_int_lit16.S */
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+/* File: arm/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r2 @ r0<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ mul r0, r1, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_int_lit16: /* 0xd3 */
+/* File: arm/op_div_int_lit16.S */
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * div-int/lit16
+ *
+ */
+ FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r2 @ r0<- vB
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r0, r0, r1 @ r0<- op
+#else
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_int_lit16: /* 0xd4 */
+/* File: arm/op_rem_int_lit16.S */
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * NOTE: idivmod returns quotient in r0 and remainder in r1
+ *
+ * rem-int/lit16
+ *
+ */
+ FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r2 @ r0<- vB
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r2, r0, r1
+ mls r1, r1, r2, r0 @ r1<- op
+#else
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r1, r9 @ vAA<- r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_int_lit16: /* 0xd5 */
+/* File: arm/op_and_int_lit16.S */
+/* File: arm/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r2 @ r0<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ and r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_int_lit16: /* 0xd6 */
+/* File: arm/op_or_int_lit16.S */
+/* File: arm/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r2 @ r0<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ orr r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_int_lit16: /* 0xd7 */
+/* File: arm/op_xor_int_lit16.S */
+/* File: arm/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r2 @ r0<- vB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ eor r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_add_int_lit8: /* 0xd8 */
+/* File: arm/op_add_int_lit8.S */
+/* File: arm/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ add r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rsub_int_lit8: /* 0xd9 */
+/* File: arm/op_rsub_int_lit8.S */
+/* File: arm/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ rsb r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_mul_int_lit8: /* 0xda */
+/* File: arm/op_mul_int_lit8.S */
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+/* File: arm/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ mul r0, r1, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_div_int_lit8: /* 0xdb */
+/* File: arm/op_div_int_lit8.S */
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * div-int/lit8
+ *
+ */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r0, r0, r1 @ r0<- op
+#else
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_rem_int_lit8: /* 0xdc */
+/* File: arm/op_rem_int_lit8.S */
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * NOTE: idivmod returns quotient in r0 and remainder in r1
+ *
+ * rem-int/lit8
+ *
+ */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r2, r0, r1
+ mls r1, r1, r2, r0 @ r1<- op
+#else
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r1, r9 @ vAA<- r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
+
+/* ------------------------------ */
+ .balign 128
+.L_op_and_int_lit8: /* 0xdd */
+/* File: arm/op_and_int_lit8.S */
+/* File: arm/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ and r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_or_int_lit8: /* 0xde */
+/* File: arm/op_or_int_lit8.S */
+/* File: arm/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ orr r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_xor_int_lit8: /* 0xdf */
+/* File: arm/op_xor_int_lit8.S */
+/* File: arm/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ eor r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shl_int_lit8: /* 0xe0 */
+/* File: arm/op_shl_int_lit8.S */
+/* File: arm/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asl r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_shr_int_lit8: /* 0xe1 */
+/* File: arm/op_shr_int_lit8.S */
+/* File: arm/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_ushr_int_lit8: /* 0xe2 */
+/* File: arm/op_ushr_int_lit8.S */
+/* File: arm/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_quick: /* 0xe3 */
+/* File: arm/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r3, r2 @ r3<- object we're operating on
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ ldr r0, [r3, r1] @ r0<- obj.field
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r0, r2 @ fp[A]<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_wide_quick: /* 0xe4 */
+/* File: arm/op_iget_wide_quick.S */
+ /* iget-wide-quick vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH ip, 1 @ ip<- field byte offset
+ GET_VREG r3, r2 @ r3<- object we're operating on
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ ldrd r0, [r3, ip] @ r0<- obj.field (64 bits, aligned)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r3, {r0-r1} @ fp[A]<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_object_quick: /* 0xe5 */
+/* File: arm/op_iget_object_quick.S */
+ /* For: iget-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r0, r2 @ r0<- object we're operating on
+ cmp r0, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ bl artIGetObjectFromMterp @ (obj, offset)
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ PREFETCH_INST 2
+ cmp r3, #0
+ bne MterpPossibleException @ bail out
+ SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
+ ADVANCE 2 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_quick: /* 0xe6 */
+/* File: arm/op_iput_quick.S */
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r3, r2 @ r3<- fp[B], the object pointer
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ GET_VREG r0, r2 @ r0<- fp[A]
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ str r0, [r3, r1] @ obj.field<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_wide_quick: /* 0xe7 */
+/* File: arm/op_iput_wide_quick.S */
+ /* iput-wide-quick vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r3, 1 @ r3<- field byte offset
+ GET_VREG r2, r2 @ r2<- fp[B], the object pointer
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ cmp r2, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ add r0, rFP, r0, lsl #2 @ r0<- &fp[A]
+ ldmia r0, {r0-r1} @ r0/r1<- fp[A]/fp[A+1]
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ strd r0, [r2, r3] @ obj.field<- r0/r1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_object_quick: /* 0xe8 */
+/* File: arm/op_iput_object_quick.S */
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rINST
+ bl MterpIputObjectQuick
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_virtual_quick: /* 0xe9 */
+/* File: arm/op_invoke_virtual_quick.S */
+/* File: arm/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeVirtualQuick
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl MterpInvokeVirtualQuick
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_virtual_range_quick: /* 0xea */
+/* File: arm/op_invoke_virtual_range_quick.S */
+/* File: arm/invoke.S */
+ /*
+ * Generic invoke handler wrapper.
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ .extern MterpInvokeVirtualQuickRange
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl MterpInvokeVirtualQuickRange
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 3
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_boolean_quick: /* 0xeb */
+/* File: arm/op_iput_boolean_quick.S */
+/* File: arm/op_iput_quick.S */
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r3, r2 @ r3<- fp[B], the object pointer
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ GET_VREG r0, r2 @ r0<- fp[A]
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ strb r0, [r3, r1] @ obj.field<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_byte_quick: /* 0xec */
+/* File: arm/op_iput_byte_quick.S */
+/* File: arm/op_iput_quick.S */
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r3, r2 @ r3<- fp[B], the object pointer
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ GET_VREG r0, r2 @ r0<- fp[A]
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ strb r0, [r3, r1] @ obj.field<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_char_quick: /* 0xed */
+/* File: arm/op_iput_char_quick.S */
+/* File: arm/op_iput_quick.S */
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r3, r2 @ r3<- fp[B], the object pointer
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ GET_VREG r0, r2 @ r0<- fp[A]
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ strh r0, [r3, r1] @ obj.field<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iput_short_quick: /* 0xee */
+/* File: arm/op_iput_short_quick.S */
+/* File: arm/op_iput_quick.S */
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r3, r2 @ r3<- fp[B], the object pointer
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ GET_VREG r0, r2 @ r0<- fp[A]
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ strh r0, [r3, r1] @ obj.field<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_boolean_quick: /* 0xef */
+/* File: arm/op_iget_boolean_quick.S */
+/* File: arm/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r3, r2 @ r3<- object we're operating on
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ ldrb r0, [r3, r1] @ r0<- obj.field
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r0, r2 @ fp[A]<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_byte_quick: /* 0xf0 */
+/* File: arm/op_iget_byte_quick.S */
+/* File: arm/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r3, r2 @ r3<- object we're operating on
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ ldrsb r0, [r3, r1] @ r0<- obj.field
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r0, r2 @ fp[A]<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_char_quick: /* 0xf1 */
+/* File: arm/op_iget_char_quick.S */
+/* File: arm/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r3, r2 @ r3<- object we're operating on
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ ldrh r0, [r3, r1] @ r0<- obj.field
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r0, r2 @ fp[A]<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_iget_short_quick: /* 0xf2 */
+/* File: arm/op_iget_short_quick.S */
+/* File: arm/op_iget_quick.S */
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r3, r2 @ r3<- object we're operating on
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ ldrsh r0, [r3, r1] @ r0<- obj.field
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r0, r2 @ fp[A]<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_invoke_lambda: /* 0xf3 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_f4: /* 0xf4 */
+/* File: arm/op_unused_f4.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_capture_variable: /* 0xf5 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_create_lambda: /* 0xf6 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_liberate_variable: /* 0xf7 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_box_lambda: /* 0xf8 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unbox_lambda: /* 0xf9 */
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fa: /* 0xfa */
+/* File: arm/op_unused_fa.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fb: /* 0xfb */
+/* File: arm/op_unused_fb.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fc: /* 0xfc */
+/* File: arm/op_unused_fc.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fd: /* 0xfd */
+/* File: arm/op_unused_fd.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_fe: /* 0xfe */
+/* File: arm/op_unused_fe.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+/* ------------------------------ */
+ .balign 128
+.L_op_unused_ff: /* 0xff */
+/* File: arm/op_unused_ff.S */
+/* File: arm/unused.S */
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+
+ .balign 128
+ .size artMterpAsmInstructionStart, .-artMterpAsmInstructionStart
+ .global artMterpAsmInstructionEnd
+artMterpAsmInstructionEnd:
+
+/*
+ * ===========================================================================
+ * Sister implementations
+ * ===========================================================================
+ */
+ .global artMterpAsmSisterStart
+ .type artMterpAsmSisterStart, %function
+ .text
+ .balign 4
+artMterpAsmSisterStart:
+
+/* continuation for op_cmp_long */
+
+.Lop_cmp_long_less:
+ mvn r1, #0 @ r1<- -1
+ @ Want to cond code the next mov so we can avoid branch, but don't see it;
+ @ instead, we just replicate the tail end.
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r1, r9 @ vAA<- r1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+.Lop_cmp_long_greater:
+ mov r1, #1 @ r1<- 1
+ @ fall through to _finish
+
+.Lop_cmp_long_finish:
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r1, r9 @ vAA<- r1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/* continuation for op_float_to_long */
+/*
+ * Convert the float in r0 to a long in r0/r1.
+ *
+ * We have to clip values to long min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+f2l_doconv:
+ stmfd sp!, {r4, lr}
+ mov r1, #0x5f000000 @ (float)maxlong
+ mov r4, r0
+ bl __aeabi_fcmpge @ is arg >= maxlong?
+ cmp r0, #0 @ nonzero == yes
+ mvnne r0, #0 @ return maxlong (7fffffff)
+ mvnne r1, #0x80000000
+ ldmnefd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ mov r1, #0xdf000000 @ (float)minlong
+ bl __aeabi_fcmple @ is arg <= minlong?
+ cmp r0, #0 @ nonzero == yes
+ movne r0, #0 @ return minlong (80000000)
+ movne r1, #0x80000000
+ ldmnefd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ mov r1, r4
+ bl __aeabi_fcmpeq @ is arg == self?
+ cmp r0, #0 @ zero == no
+ moveq r1, #0 @ return zero for NaN
+ ldmeqfd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ bl __aeabi_f2lz @ convert float to long
+ ldmfd sp!, {r4, pc}
+
+/* continuation for op_double_to_long */
+/*
+ * Convert the double in r0/r1 to a long in r0/r1.
+ *
+ * We have to clip values to long min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+d2l_doconv:
+ stmfd sp!, {r4, r5, lr} @ save regs
+ mov r3, #0x43000000 @ maxlong, as a double (high word)
+ add r3, #0x00e00000 @ 0x43e00000
+ mov r2, #0 @ maxlong, as a double (low word)
+ sub sp, sp, #4 @ align for EABI
+ mov r4, r0 @ save a copy of r0
+ mov r5, r1 @ and r1
+ bl __aeabi_dcmpge @ is arg >= maxlong?
+ cmp r0, #0 @ nonzero == yes
+ mvnne r0, #0 @ return maxlong (7fffffffffffffff)
+ mvnne r1, #0x80000000
+ bne 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ mov r3, #0xc3000000 @ minlong, as a double (high word)
+ add r3, #0x00e00000 @ 0xc3e00000
+ mov r2, #0 @ minlong, as a double (low word)
+ bl __aeabi_dcmple @ is arg <= minlong?
+ cmp r0, #0 @ nonzero == yes
+ movne r0, #0 @ return minlong (8000000000000000)
+ movne r1, #0x80000000
+ bne 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ mov r2, r4 @ compare against self
+ mov r3, r5
+ bl __aeabi_dcmpeq @ is arg == self?
+ cmp r0, #0 @ zero == no
+ moveq r1, #0 @ return zero for NaN
+ beq 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ bl __aeabi_d2lz @ convert double to long
+
+1:
+ add sp, sp, #4
+ ldmfd sp!, {r4, r5, pc}
+
+ .size artMterpAsmSisterStart, .-artMterpAsmSisterStart
+ .global artMterpAsmSisterEnd
+artMterpAsmSisterEnd:
+
+
+ .global artMterpAsmAltInstructionStart
+ .type artMterpAsmAltInstructionStart, %function
+ .text
+
+artMterpAsmAltInstructionStart = .L_ALT_op_nop
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_nop: /* 0x00 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (0 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move: /* 0x01 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (1 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_from16: /* 0x02 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (2 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_16: /* 0x03 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (3 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_wide: /* 0x04 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (4 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_wide_from16: /* 0x05 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (5 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_wide_16: /* 0x06 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (6 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_object: /* 0x07 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (7 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_object_from16: /* 0x08 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (8 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_object_16: /* 0x09 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (9 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_result: /* 0x0a */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (10 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_result_wide: /* 0x0b */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (11 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_result_object: /* 0x0c */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (12 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_move_exception: /* 0x0d */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (13 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return_void: /* 0x0e */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (14 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return: /* 0x0f */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (15 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return_wide: /* 0x10 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (16 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return_object: /* 0x11 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (17 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_4: /* 0x12 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (18 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_16: /* 0x13 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (19 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const: /* 0x14 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (20 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_high16: /* 0x15 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (21 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_wide_16: /* 0x16 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (22 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_wide_32: /* 0x17 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (23 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_wide: /* 0x18 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (24 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_wide_high16: /* 0x19 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (25 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_string: /* 0x1a */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (26 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_string_jumbo: /* 0x1b */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (27 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_const_class: /* 0x1c */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (28 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_monitor_enter: /* 0x1d */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (29 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_monitor_exit: /* 0x1e */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (30 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_check_cast: /* 0x1f */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (31 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_instance_of: /* 0x20 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (32 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_array_length: /* 0x21 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (33 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_new_instance: /* 0x22 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (34 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_new_array: /* 0x23 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (35 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_filled_new_array: /* 0x24 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (36 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_filled_new_array_range: /* 0x25 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (37 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_fill_array_data: /* 0x26 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (38 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_throw: /* 0x27 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (39 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_goto: /* 0x28 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (40 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_goto_16: /* 0x29 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (41 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_goto_32: /* 0x2a */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (42 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_packed_switch: /* 0x2b */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (43 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sparse_switch: /* 0x2c */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (44 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmpl_float: /* 0x2d */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (45 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmpg_float: /* 0x2e */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (46 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmpl_double: /* 0x2f */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (47 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmpg_double: /* 0x30 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (48 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_cmp_long: /* 0x31 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (49 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_eq: /* 0x32 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (50 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_ne: /* 0x33 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (51 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_lt: /* 0x34 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (52 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_ge: /* 0x35 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (53 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_gt: /* 0x36 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (54 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_le: /* 0x37 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (55 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_eqz: /* 0x38 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (56 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_nez: /* 0x39 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (57 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_ltz: /* 0x3a */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (58 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_gez: /* 0x3b */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (59 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_gtz: /* 0x3c */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (60 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_if_lez: /* 0x3d */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (61 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_3e: /* 0x3e */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (62 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_3f: /* 0x3f */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (63 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_40: /* 0x40 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (64 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_41: /* 0x41 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (65 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_42: /* 0x42 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (66 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_43: /* 0x43 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (67 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget: /* 0x44 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (68 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_wide: /* 0x45 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (69 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_object: /* 0x46 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (70 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_boolean: /* 0x47 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (71 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_byte: /* 0x48 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (72 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_char: /* 0x49 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (73 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aget_short: /* 0x4a */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (74 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput: /* 0x4b */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (75 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_wide: /* 0x4c */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (76 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_object: /* 0x4d */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (77 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_boolean: /* 0x4e */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (78 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_byte: /* 0x4f */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (79 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_char: /* 0x50 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (80 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_aput_short: /* 0x51 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (81 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget: /* 0x52 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (82 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_wide: /* 0x53 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (83 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_object: /* 0x54 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (84 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_boolean: /* 0x55 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (85 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_byte: /* 0x56 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (86 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_char: /* 0x57 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (87 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_short: /* 0x58 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (88 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput: /* 0x59 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (89 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_wide: /* 0x5a */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (90 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_object: /* 0x5b */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (91 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_boolean: /* 0x5c */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (92 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_byte: /* 0x5d */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (93 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_char: /* 0x5e */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (94 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_short: /* 0x5f */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (95 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget: /* 0x60 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (96 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_wide: /* 0x61 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (97 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_object: /* 0x62 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (98 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_boolean: /* 0x63 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (99 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_byte: /* 0x64 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (100 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_char: /* 0x65 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (101 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sget_short: /* 0x66 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (102 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput: /* 0x67 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (103 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_wide: /* 0x68 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (104 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_object: /* 0x69 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (105 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_boolean: /* 0x6a */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (106 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_byte: /* 0x6b */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (107 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_char: /* 0x6c */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (108 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sput_short: /* 0x6d */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (109 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_virtual: /* 0x6e */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (110 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_super: /* 0x6f */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (111 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_direct: /* 0x70 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (112 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_static: /* 0x71 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (113 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_interface: /* 0x72 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (114 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_return_void_no_barrier: /* 0x73 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (115 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_virtual_range: /* 0x74 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (116 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_super_range: /* 0x75 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (117 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_direct_range: /* 0x76 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (118 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_static_range: /* 0x77 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (119 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_interface_range: /* 0x78 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (120 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_79: /* 0x79 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (121 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_7a: /* 0x7a */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (122 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_neg_int: /* 0x7b */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (123 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_not_int: /* 0x7c */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (124 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_neg_long: /* 0x7d */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (125 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_not_long: /* 0x7e */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (126 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_neg_float: /* 0x7f */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (127 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_neg_double: /* 0x80 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (128 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_long: /* 0x81 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (129 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_float: /* 0x82 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (130 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_double: /* 0x83 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (131 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_long_to_int: /* 0x84 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (132 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_long_to_float: /* 0x85 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (133 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_long_to_double: /* 0x86 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (134 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_float_to_int: /* 0x87 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (135 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_float_to_long: /* 0x88 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (136 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_float_to_double: /* 0x89 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (137 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_double_to_int: /* 0x8a */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (138 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_double_to_long: /* 0x8b */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (139 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_double_to_float: /* 0x8c */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (140 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_byte: /* 0x8d */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (141 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_char: /* 0x8e */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (142 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_int_to_short: /* 0x8f */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (143 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_int: /* 0x90 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (144 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_int: /* 0x91 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (145 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_int: /* 0x92 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (146 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_int: /* 0x93 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (147 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_int: /* 0x94 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (148 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_int: /* 0x95 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (149 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_int: /* 0x96 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (150 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_int: /* 0x97 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (151 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_int: /* 0x98 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (152 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_int: /* 0x99 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (153 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_int: /* 0x9a */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (154 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_long: /* 0x9b */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (155 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_long: /* 0x9c */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (156 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_long: /* 0x9d */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (157 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_long: /* 0x9e */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (158 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_long: /* 0x9f */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (159 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_long: /* 0xa0 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (160 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_long: /* 0xa1 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (161 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_long: /* 0xa2 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (162 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_long: /* 0xa3 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (163 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_long: /* 0xa4 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (164 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_long: /* 0xa5 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (165 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_float: /* 0xa6 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (166 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_float: /* 0xa7 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (167 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_float: /* 0xa8 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (168 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_float: /* 0xa9 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (169 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_float: /* 0xaa */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (170 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_double: /* 0xab */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (171 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_double: /* 0xac */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (172 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_double: /* 0xad */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (173 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_double: /* 0xae */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (174 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_double: /* 0xaf */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (175 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_int_2addr: /* 0xb0 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (176 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_int_2addr: /* 0xb1 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (177 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_int_2addr: /* 0xb2 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (178 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_int_2addr: /* 0xb3 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (179 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_int_2addr: /* 0xb4 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (180 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_int_2addr: /* 0xb5 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (181 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_int_2addr: /* 0xb6 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (182 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_int_2addr: /* 0xb7 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (183 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_int_2addr: /* 0xb8 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (184 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_int_2addr: /* 0xb9 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (185 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_int_2addr: /* 0xba */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (186 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_long_2addr: /* 0xbb */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (187 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_long_2addr: /* 0xbc */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (188 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_long_2addr: /* 0xbd */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (189 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_long_2addr: /* 0xbe */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (190 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_long_2addr: /* 0xbf */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (191 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_long_2addr: /* 0xc0 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (192 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_long_2addr: /* 0xc1 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (193 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_long_2addr: /* 0xc2 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (194 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_long_2addr: /* 0xc3 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (195 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_long_2addr: /* 0xc4 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (196 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_long_2addr: /* 0xc5 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (197 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_float_2addr: /* 0xc6 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (198 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_float_2addr: /* 0xc7 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (199 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_float_2addr: /* 0xc8 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (200 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_float_2addr: /* 0xc9 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (201 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_float_2addr: /* 0xca */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (202 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_double_2addr: /* 0xcb */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (203 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_sub_double_2addr: /* 0xcc */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (204 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_double_2addr: /* 0xcd */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (205 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_double_2addr: /* 0xce */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (206 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_double_2addr: /* 0xcf */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (207 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_int_lit16: /* 0xd0 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (208 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rsub_int: /* 0xd1 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (209 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_int_lit16: /* 0xd2 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (210 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_int_lit16: /* 0xd3 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (211 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_int_lit16: /* 0xd4 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (212 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_int_lit16: /* 0xd5 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (213 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_int_lit16: /* 0xd6 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (214 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_int_lit16: /* 0xd7 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (215 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_add_int_lit8: /* 0xd8 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (216 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rsub_int_lit8: /* 0xd9 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (217 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_mul_int_lit8: /* 0xda */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (218 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_div_int_lit8: /* 0xdb */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (219 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_rem_int_lit8: /* 0xdc */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (220 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_and_int_lit8: /* 0xdd */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (221 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_or_int_lit8: /* 0xde */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (222 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_xor_int_lit8: /* 0xdf */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (223 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shl_int_lit8: /* 0xe0 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (224 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_shr_int_lit8: /* 0xe1 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (225 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_ushr_int_lit8: /* 0xe2 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (226 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_quick: /* 0xe3 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (227 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_wide_quick: /* 0xe4 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (228 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_object_quick: /* 0xe5 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (229 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_quick: /* 0xe6 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (230 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_wide_quick: /* 0xe7 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (231 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_object_quick: /* 0xe8 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (232 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_virtual_quick: /* 0xe9 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (233 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_virtual_range_quick: /* 0xea */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (234 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_boolean_quick: /* 0xeb */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (235 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_byte_quick: /* 0xec */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (236 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_char_quick: /* 0xed */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (237 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iput_short_quick: /* 0xee */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (238 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_boolean_quick: /* 0xef */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (239 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_byte_quick: /* 0xf0 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (240 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_char_quick: /* 0xf1 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (241 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_iget_short_quick: /* 0xf2 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (242 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_invoke_lambda: /* 0xf3 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (243 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_f4: /* 0xf4 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (244 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_capture_variable: /* 0xf5 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (245 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_create_lambda: /* 0xf6 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (246 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_liberate_variable: /* 0xf7 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (247 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_box_lambda: /* 0xf8 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (248 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unbox_lambda: /* 0xf9 */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (249 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fa: /* 0xfa */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (250 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fb: /* 0xfb */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (251 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fc: /* 0xfc */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (252 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fd: /* 0xfd */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (253 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_fe: /* 0xfe */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (254 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_op_unused_ff: /* 0xff */
+/* File: arm/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ EXPORT_PC
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
+ adrl lr, artMterpAsmInstructionStart + (255 * 128) @ Addr of primary handler.
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ b MterpCheckBefore @ (self, shadow_frame) @ Tail call.
+
+ .balign 128
+ .size artMterpAsmAltInstructionStart, .-artMterpAsmAltInstructionStart
+ .global artMterpAsmAltInstructionEnd
+artMterpAsmAltInstructionEnd:
+/* File: arm/footer.S */
+/*
+ * ===========================================================================
+ * Common subroutines and data
+ * ===========================================================================
+ */
+
+ .text
+ .align 2
+
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+#define MTERP_LOGGING 0
+common_errDivideByZero:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogDivideByZeroException
+#endif
+ b MterpCommonFallback
+
+common_errArrayIndex:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogArrayIndexException
+#endif
+ b MterpCommonFallback
+
+common_errNegativeArraySize:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogNegativeArraySizeException
+#endif
+ b MterpCommonFallback
+
+common_errNoSuchMethod:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogNoSuchMethodException
+#endif
+ b MterpCommonFallback
+
+common_errNullObject:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogNullObjectException
+#endif
+ b MterpCommonFallback
+
+common_exceptionThrown:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogExceptionThrownException
+#endif
+ b MterpCommonFallback
+
+MterpSuspendFallback:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ ldr r2, [rSELF, #THREAD_FLAGS_OFFSET]
+ bl MterpLogSuspendFallback
+#endif
+ b MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary. If there is a pending
+ * exception, handle it. Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+ ldr r0, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ cmp r0, #0 @ Exception pending?
+ beq MterpFallback @ If not, fall back to reference interpreter.
+ /* intentional fallthrough - handle pending exception. */
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+MterpException:
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpHandleException @ (self, shadow_frame)
+ cmp r0, #0
+ beq MterpExceptionReturn @ no local catch, back to caller.
+ ldr r0, [rFP, #OFF_FP_CODE_ITEM]
+ ldr r1, [rFP, #OFF_FP_DEX_PC]
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
+ add rPC, r0, #CODEITEM_INSNS_OFFSET
+ add rPC, rPC, r1, lsl #1 @ generate new dex_pc_ptr
+ str rPC, [rFP, #OFF_FP_DEX_PC_PTR]
+ /* resume execution at catch block */
+ FETCH_INST
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+ /* NOTE: no fallthrough */
+
+/*
+ * Check for suspend check request. Assumes rINST already loaded, rPC advanced and
+ * still needs to get the opcode and branch to it, and flags are in lr.
+ */
+MterpCheckSuspendAndContinue:
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh rIBASE
+ EXPORT_PC
+ mov r0, rSELF
+ ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ blne MterpSuspendCheck @ (self)
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/*
+ * Bail out to reference interpreter.
+ */
+MterpFallback:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogFallback
+#endif
+MterpCommonFallback:
+ mov r0, #0 @ signal retry with reference interpreter.
+ b MterpDone
+
+/*
+ * We pushed some registers on the stack in ExecuteMterpImpl, then saved
+ * SP and LR. Here we restore SP, restore the registers, and then restore
+ * LR to PC.
+ *
+ * On entry:
+ * uint32_t* rFP (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+ mov r0, #1 @ signal return to caller.
+ b MterpDone
+MterpReturn:
+ ldr r2, [rFP, #OFF_FP_RESULT_REGISTER]
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ str r0, [r2]
+ str r1, [r2, #4]
+ mov r0, rSELF
+ ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST)
+ blne MterpSuspendCheck @ (self)
+ mov r0, #1 @ signal return to caller.
+MterpDone:
+ add sp, sp, #4 @ un-align 64
+ ldmfd sp!, {r4-r10,fp,pc} @ restore 9 regs and return
+
+
+ .fnend
+ .size ExecuteMterpImpl, .-ExecuteMterpImpl
+
+
diff --git a/runtime/interpreter/mterp/rebuild.sh b/runtime/interpreter/mterp/rebuild.sh
new file mode 100755
index 0000000..a325fff
--- /dev/null
+++ b/runtime/interpreter/mterp/rebuild.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Rebuild for all known targets. Necessary until the stuff in "out" gets
+# generated as part of the build.
+#
+set -e
+
+# for arch in arm x86 mips arm64 x86_64 mips64; do TARGET_ARCH_EXT=$arch make -f Makefile_mterp; done
+for arch in arm; do TARGET_ARCH_EXT=$arch make -f Makefile_mterp; done
diff --git a/runtime/jit/debugger_interface.cc b/runtime/jit/debugger_interface.cc
index 3c2898b..f08a1a9 100644
--- a/runtime/jit/debugger_interface.cc
+++ b/runtime/jit/debugger_interface.cc
@@ -16,6 +16,13 @@
#include "debugger_interface.h"
+#include "base/logging.h"
+#include "base/mutex.h"
+#include "thread-inl.h"
+#include "thread.h"
+
+#include <unordered_map>
+
namespace art {
// -------------------------------------------------------------------
@@ -57,13 +64,19 @@
JITDescriptor __jit_debug_descriptor = { 1, JIT_NOACTION, nullptr, nullptr };
}
-JITCodeEntry* CreateJITCodeEntry(const uint8_t *symfile_addr, uintptr_t symfile_size) {
+static Mutex g_jit_debug_mutex("JIT debug interface lock", kJitDebugInterfaceLock);
+
+static JITCodeEntry* CreateJITCodeEntryInternal(
+ std::unique_ptr<const uint8_t[]> symfile_addr,
+ uintptr_t symfile_size)
+ REQUIRES(g_jit_debug_mutex) {
+ DCHECK(symfile_addr.get() != nullptr);
+
JITCodeEntry* entry = new JITCodeEntry;
- entry->symfile_addr_ = symfile_addr;
+ entry->symfile_addr_ = symfile_addr.release();
entry->symfile_size_ = symfile_size;
entry->prev_ = nullptr;
- // TODO: Do we need a lock here?
entry->next_ = __jit_debug_descriptor.first_entry_;
if (entry->next_ != nullptr) {
entry->next_->prev_ = entry;
@@ -76,8 +89,7 @@
return entry;
}
-void DeleteJITCodeEntry(JITCodeEntry* entry) {
- // TODO: Do we need a lock here?
+static void DeleteJITCodeEntryInternal(JITCodeEntry* entry) REQUIRES(g_jit_debug_mutex) {
if (entry->prev_ != nullptr) {
entry->prev_->next_ = entry->next_;
} else {
@@ -91,7 +103,48 @@
__jit_debug_descriptor.relevant_entry_ = entry;
__jit_debug_descriptor.action_flag_ = JIT_UNREGISTER_FN;
__jit_debug_register_code();
+ delete[] entry->symfile_addr_;
delete entry;
}
+JITCodeEntry* CreateJITCodeEntry(std::unique_ptr<const uint8_t[]> symfile_addr,
+ uintptr_t symfile_size) {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, g_jit_debug_mutex);
+ return CreateJITCodeEntryInternal(std::move(symfile_addr), symfile_size);
+}
+
+void DeleteJITCodeEntry(JITCodeEntry* entry) {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, g_jit_debug_mutex);
+ DeleteJITCodeEntryInternal(entry);
+}
+
+// Mapping from address to entry. It takes ownership of the entries
+// so that the user of the JIT interface does not have to store them.
+static std::unordered_map<uintptr_t, JITCodeEntry*> g_jit_code_entries;
+
+void CreateJITCodeEntryForAddress(uintptr_t address,
+ std::unique_ptr<const uint8_t[]> symfile_addr,
+ uintptr_t symfile_size) {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, g_jit_debug_mutex);
+ DCHECK_NE(address, 0u);
+ DCHECK(g_jit_code_entries.find(address) == g_jit_code_entries.end());
+ JITCodeEntry* entry = CreateJITCodeEntryInternal(std::move(symfile_addr), symfile_size);
+ g_jit_code_entries.emplace(address, entry);
+}
+
+bool DeleteJITCodeEntryForAddress(uintptr_t address) {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, g_jit_debug_mutex);
+ const auto& it = g_jit_code_entries.find(address);
+ if (it == g_jit_code_entries.end()) {
+ return false;
+ }
+ DeleteJITCodeEntryInternal(it->second);
+ g_jit_code_entries.erase(it);
+ return true;
+}
+
} // namespace art
diff --git a/runtime/jit/debugger_interface.h b/runtime/jit/debugger_interface.h
index a784ef5..74469a9 100644
--- a/runtime/jit/debugger_interface.h
+++ b/runtime/jit/debugger_interface.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_JIT_DEBUGGER_INTERFACE_H_
#include <inttypes.h>
+#include <memory>
namespace art {
@@ -26,11 +27,25 @@
}
// Notify native debugger about new JITed code by passing in-memory ELF.
-JITCodeEntry* CreateJITCodeEntry(const uint8_t *symfile_addr, uintptr_t symfile_size);
+// It takes ownership of the in-memory ELF file.
+JITCodeEntry* CreateJITCodeEntry(std::unique_ptr<const uint8_t[]> symfile_addr,
+ uintptr_t symfile_size);
// Notify native debugger that JITed code has been removed.
+// It also releases the associated in-memory ELF file.
void DeleteJITCodeEntry(JITCodeEntry* entry);
+// Notify native debugger about new JITed code by passing in-memory ELF.
+// The address is used only to uniquely identify the entry.
+// It takes ownership of the in-memory ELF file.
+void CreateJITCodeEntryForAddress(uintptr_t address,
+ std::unique_ptr<const uint8_t[]> symfile_addr,
+ uintptr_t symfile_size);
+
+// Notify native debugger that JITed code has been removed.
+// Returns false if entry for the given address was not found.
+bool DeleteJITCodeEntryForAddress(uintptr_t address);
+
} // namespace art
#endif // ART_RUNTIME_JIT_DEBUGGER_INTERFACE_H_
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index c260ca4..1ac57b1 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -21,6 +21,7 @@
#include "art_method-inl.h"
#include "base/stl_util.h"
#include "base/time_utils.h"
+#include "debugger_interface.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc/accounting/bitmap-inl.h"
#include "jit/profiling_info.h"
@@ -215,6 +216,9 @@
uintptr_t allocation = FromCodeToAllocation(code_ptr);
const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
const uint8_t* data = method_header->GetNativeGcMap();
+ // Notify native debugger that we are about to remove the code.
+ // It does nothing if we are not using native debugger.
+ DeleteJITCodeEntryForAddress(reinterpret_cast<uintptr_t>(code_ptr));
if (data != nullptr) {
mspace_free(data_mspace_, const_cast<uint8_t*>(data));
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 6b8f17d..c4694ee 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -211,6 +211,7 @@
safe_mode_(false) {
CheckAsmSupportOffsetsAndSizes();
std::fill(callee_save_methods_, callee_save_methods_ + arraysize(callee_save_methods_), 0u);
+ interpreter::CheckInterpreterAsmConstants();
}
Runtime::~Runtime() {
@@ -1272,11 +1273,14 @@
}
}
{
+ constexpr const char* kOpenJdkLibrary = kIsDebugBuild
+ ? "libopenjdkd.so"
+ : "libopenjdk.so";
std::string error_msg;
- if (!java_vm_->LoadNativeLibrary(env, "libopenjdk.so", nullptr,
+ if (!java_vm_->LoadNativeLibrary(env, kOpenJdkLibrary, nullptr,
/* is_shared_namespace */ false,
nullptr, nullptr, &error_msg)) {
- LOG(FATAL) << "LoadNativeLibrary failed for \"libopenjdk.so\": " << error_msg;
+ LOG(FATAL) << "LoadNativeLibrary failed for \"" << kOpenJdkLibrary << "\": " << error_msg;
}
}
diff --git a/runtime/safe_map.h b/runtime/safe_map.h
index 4e62dda..a8b48ee 100644
--- a/runtime/safe_map.h
+++ b/runtime/safe_map.h
@@ -146,7 +146,7 @@
template<class Key, class T, AllocatorTag kTag, class Compare = std::less<Key>>
class AllocationTrackingSafeMap : public SafeMap<
- Key, T, Compare, TrackingAllocator<std::pair<Key, T>, kTag>> {
+ Key, T, Compare, TrackingAllocator<std::pair<const Key, T>, kTag>> {
};
} // namespace art
diff --git a/runtime/stack.h b/runtime/stack.h
index a0c44cb..4fa1a4f 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -184,11 +184,12 @@
}
uint32_t GetDexPC() const {
- return dex_pc_;
+ return (dex_pc_ptr_ == nullptr) ? dex_pc_ : dex_pc_ptr_ - code_item_->insns_;
}
void SetDexPC(uint32_t dex_pc) {
dex_pc_ = dex_pc;
+ dex_pc_ptr_ = nullptr;
}
ShadowFrame* GetLink() const {
@@ -206,6 +207,20 @@
return *reinterpret_cast<const int32_t*>(vreg);
}
+ uint32_t* GetVRegAddr(size_t i) {
+ return &vregs_[i];
+ }
+
+ uint32_t* GetShadowRefAddr(size_t i) {
+ DCHECK(HasReferenceArray());
+ DCHECK_LT(i, NumberOfVRegs());
+ return &vregs_[i + NumberOfVRegs()];
+ }
+
+ void SetCodeItem(const DexFile::CodeItem* code_item) {
+ code_item_ = code_item;
+ }
+
float GetVRegFloat(size_t i) const {
DCHECK_LT(i, NumberOfVRegs());
// NOTE: Strict-aliasing?
@@ -346,6 +361,10 @@
return lock_count_data_;
}
+ static size_t LockCountDataOffset() {
+ return OFFSETOF_MEMBER(ShadowFrame, lock_count_data_);
+ }
+
static size_t LinkOffset() {
return OFFSETOF_MEMBER(ShadowFrame, link_);
}
@@ -366,6 +385,18 @@
return OFFSETOF_MEMBER(ShadowFrame, vregs_);
}
+ static size_t ResultRegisterOffset() {
+ return OFFSETOF_MEMBER(ShadowFrame, result_register_);
+ }
+
+ static size_t DexPCPtrOffset() {
+ return OFFSETOF_MEMBER(ShadowFrame, dex_pc_ptr_);
+ }
+
+ static size_t CodeItemOffset() {
+ return OFFSETOF_MEMBER(ShadowFrame, code_item_);
+ }
+
// Create ShadowFrame for interpreter using provided memory.
static ShadowFrame* CreateShadowFrameImpl(uint32_t num_vregs,
ShadowFrame* link,
@@ -375,10 +406,19 @@
return new (memory) ShadowFrame(num_vregs, link, method, dex_pc, true);
}
+ uint16_t* GetDexPCPtr() {
+ return dex_pc_ptr_;
+ }
+
+ JValue* GetResultRegister() {
+ return result_register_;
+ }
+
private:
ShadowFrame(uint32_t num_vregs, ShadowFrame* link, ArtMethod* method,
uint32_t dex_pc, bool has_reference_array)
- : number_of_vregs_(num_vregs), link_(link), method_(method), dex_pc_(dex_pc) {
+ : link_(link), method_(method), result_register_(nullptr), dex_pc_ptr_(nullptr),
+ code_item_(nullptr), number_of_vregs_(num_vregs), dex_pc_(dex_pc) {
// TODO(iam): Remove this parameter, it's an an artifact of portable removal
DCHECK(has_reference_array);
if (has_reference_array) {
@@ -399,12 +439,15 @@
const_cast<const ShadowFrame*>(this)->References());
}
- const uint32_t number_of_vregs_;
// Link to previous shadow frame or null.
ShadowFrame* link_;
ArtMethod* method_;
- uint32_t dex_pc_;
+ JValue* result_register_;
+ uint16_t* dex_pc_ptr_;
+ const DexFile::CodeItem* code_item_;
LockCountData lock_count_data_; // This may contain GC roots when lock counting is active.
+ const uint32_t number_of_vregs_;
+ uint32_t dex_pc_;
// This is a two-part array:
// - [0..number_of_vregs) holds the raw virtual registers, and each element here is always 4
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 13e3774..21241d2 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -76,6 +76,7 @@
#include "verify_object-inl.h"
#include "vmap_table.h"
#include "well_known_classes.h"
+#include "interpreter/interpreter.h"
#if ART_USE_FUTEXES
#include "linux/futex.h"
@@ -686,6 +687,7 @@
RemoveSuspendTrigger();
InitCardTable();
InitTid();
+ interpreter::InitInterpreterTls(this);
#ifdef __ANDROID__
__get_tls()[TLS_SLOT_ART_THREAD_SELF] = this;
diff --git a/runtime/thread.h b/runtime/thread.h
index 6cb895c..b25bcb2 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -601,6 +601,24 @@
}
template<size_t pointer_size>
+ static ThreadOffset<pointer_size> MterpCurrentIBaseOffset() {
+ return ThreadOffsetFromTlsPtr<pointer_size>(
+ OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_current_ibase));
+ }
+
+ template<size_t pointer_size>
+ static ThreadOffset<pointer_size> MterpDefaultIBaseOffset() {
+ return ThreadOffsetFromTlsPtr<pointer_size>(
+ OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_default_ibase));
+ }
+
+ template<size_t pointer_size>
+ static ThreadOffset<pointer_size> MterpAltIBaseOffset() {
+ return ThreadOffsetFromTlsPtr<pointer_size>(
+ OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_alt_ibase));
+ }
+
+ template<size_t pointer_size>
static ThreadOffset<pointer_size> ExceptionOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
}
@@ -1001,6 +1019,30 @@
void ProtectStack();
bool UnprotectStack();
+ void SetMterpDefaultIBase(void* ibase) {
+ tlsPtr_.mterp_default_ibase = ibase;
+ }
+
+ void SetMterpCurrentIBase(void* ibase) {
+ tlsPtr_.mterp_current_ibase = ibase;
+ }
+
+ void SetMterpAltIBase(void* ibase) {
+ tlsPtr_.mterp_alt_ibase = ibase;
+ }
+
+ const void* GetMterpDefaultIBase() const {
+ return tlsPtr_.mterp_default_ibase;
+ }
+
+ const void* GetMterpCurrentIBase() const {
+ return tlsPtr_.mterp_current_ibase;
+ }
+
+ const void* GetMterpAltIBase() const {
+ return tlsPtr_.mterp_alt_ibase;
+ }
+
void NoteSignalBeingHandled() {
if (tls32_.handling_signal_) {
LOG(FATAL) << "Detected signal while processing a signal";
@@ -1246,6 +1288,7 @@
frame_id_to_shadow_frame(nullptr), name(nullptr), pthread_self(0),
last_no_thread_suspension_cause(nullptr), thread_local_start(nullptr),
thread_local_pos(nullptr), thread_local_end(nullptr), thread_local_objects(0),
+ mterp_current_ibase(nullptr), mterp_default_ibase(nullptr), mterp_alt_ibase(nullptr),
thread_local_alloc_stack_top(nullptr), thread_local_alloc_stack_end(nullptr),
nested_signal_state(nullptr), flip_function(nullptr), method_verifier(nullptr),
thread_local_mark_stack(nullptr) {
@@ -1364,6 +1407,11 @@
uint8_t* thread_local_end;
size_t thread_local_objects;
+ // Mterp jump table bases.
+ void* mterp_current_ibase;
+ void* mterp_default_ibase;
+ void* mterp_alt_ibase;
+
// There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
void* rosalloc_runs[kNumRosAllocThreadLocalSizeBrackets];
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index ae18819..fc1a445 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -1145,6 +1145,7 @@
void ThreadList::SuspendAllDaemonThreadsForShutdown() {
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::thread_list_lock_);
+ size_t daemons_left = 0;
{ // Tell all the daemons it's time to suspend.
MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
for (const auto& thread : list_) {
@@ -1153,12 +1154,21 @@
CHECK(thread->IsDaemon()) << *thread;
if (thread != self) {
thread->ModifySuspendCount(self, +1, nullptr, false);
+ ++daemons_left;
}
// We are shutting down the runtime, set the JNI functions of all the JNIEnvs to be
// the sleep forever one.
thread->GetJniEnv()->SetFunctionsToRuntimeShutdownFunctions();
}
}
+ // If we have any daemons left, wait 200ms to ensure they are not stuck in a place where they
+ // are about to access runtime state and are not in a runnable state. Examples: Monitor code
+ // or waking up from a condition variable. TODO: Try and see if there is a better way to wait
+ // for daemon threads to be in a blocked state.
+ if (daemons_left > 0) {
+ static constexpr size_t kDaemonSleepTime = 200 * 1000;
+ usleep(kDaemonSleepTime);
+ }
// Give the threads a chance to suspend, complaining if they're slow.
bool have_complained = false;
static constexpr size_t kTimeoutMicroseconds = 2000 * 1000;
diff --git a/test/136-daemon-jni-shutdown/daemon_jni_shutdown.cc b/test/136-daemon-jni-shutdown/daemon_jni_shutdown.cc
new file mode 100644
index 0000000..54879fb
--- /dev/null
+++ b/test/136-daemon-jni-shutdown/daemon_jni_shutdown.cc
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+
+#include "base/casts.h"
+#include "base/macros.h"
+#include "java_vm_ext.h"
+#include "jni_env_ext.h"
+#include "thread-inl.h"
+
+namespace art {
+namespace {
+
+static volatile std::atomic<bool> vm_was_shutdown(false);
+
+extern "C" JNIEXPORT void JNICALL Java_Main_waitAndCallIntoJniEnv(JNIEnv* env, jclass) {
+ // Wait until the runtime is shutdown.
+ while (!vm_was_shutdown.load()) {
+ usleep(1000);
+ }
+ std::cout << "About to call exception check\n";
+ env->ExceptionCheck();
+ LOG(ERROR) << "Should not be reached!";
+}
+
+// NO_RETURN does not work with extern "C" for target builds.
+extern "C" JNIEXPORT void JNICALL Java_Main_destroyJavaVMAndExit(JNIEnv* env, jclass) {
+ // Fake up the managed stack so we can detach.
+ Thread* const self = Thread::Current();
+ self->SetTopOfStack(nullptr);
+ self->SetTopOfShadowStack(nullptr);
+ JavaVM* vm = down_cast<JNIEnvExt*>(env)->vm;
+ vm->DetachCurrentThread();
+ vm->DestroyJavaVM();
+ vm_was_shutdown.store(true);
+ // Give threads some time to get stuck in ExceptionCheck.
+ usleep(1000000);
+ if (env != nullptr) {
+ // Use env != nullptr to trick noreturn.
+ exit(0);
+ }
+}
+
+} // namespace
+} // namespace art
diff --git a/test/136-daemon-jni-shutdown/expected.txt b/test/136-daemon-jni-shutdown/expected.txt
new file mode 100644
index 0000000..f0b6353
--- /dev/null
+++ b/test/136-daemon-jni-shutdown/expected.txt
@@ -0,0 +1,5 @@
+JNI_OnLoad called
+About to call exception check
+About to call exception check
+About to call exception check
+About to call exception check
diff --git a/test/136-daemon-jni-shutdown/info.txt b/test/136-daemon-jni-shutdown/info.txt
new file mode 100644
index 0000000..06a12df
--- /dev/null
+++ b/test/136-daemon-jni-shutdown/info.txt
@@ -0,0 +1 @@
+Test that daemon threads that call into a JNI env after the runtime is shutdown do not crash.
\ No newline at end of file
diff --git a/test/136-daemon-jni-shutdown/src/Main.java b/test/136-daemon-jni-shutdown/src/Main.java
new file mode 100644
index 0000000..6eceb75
--- /dev/null
+++ b/test/136-daemon-jni-shutdown/src/Main.java
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Test that daemon threads that call into a JNI env after the runtime is shutdown do not crash.
+ */
+public class Main {
+
+ public final static int THREAD_COUNT = 4;
+
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
+
+ for (int i = 0; i < THREAD_COUNT; i++) {
+ Thread t = new Thread(new DaemonRunnable());
+ t.setDaemon(true);
+ t.start();
+ }
+ // Give threads time to start and become stuck in waitAndCallIntoJniEnv.
+ Thread.sleep(1000);
+ destroyJavaVMAndExit();
+ }
+
+ static native void waitAndCallIntoJniEnv();
+ static native void destroyJavaVMAndExit();
+
+ private static class DaemonRunnable implements Runnable {
+ public void run() {
+ for (;;) {
+ waitAndCallIntoJniEnv();
+ }
+ }
+ }
+}
diff --git a/test/137-cfi/run b/test/137-cfi/run
index ecbbbc7..9c567b6 100755
--- a/test/137-cfi/run
+++ b/test/137-cfi/run
@@ -14,4 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-exec ${RUN} "$@"
+exec ${RUN} "$@" -Xcompiler-option --generate-debug-info
diff --git a/test/561-shared-slowpaths/expected.txt b/test/561-shared-slowpaths/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/561-shared-slowpaths/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/561-shared-slowpaths/info.txt b/test/561-shared-slowpaths/info.txt
new file mode 100644
index 0000000..c51e70b
--- /dev/null
+++ b/test/561-shared-slowpaths/info.txt
@@ -0,0 +1 @@
+Test on correctness while possibly sharing slow paths.
diff --git a/test/561-shared-slowpaths/src/Main.java b/test/561-shared-slowpaths/src/Main.java
new file mode 100644
index 0000000..718b875
--- /dev/null
+++ b/test/561-shared-slowpaths/src/Main.java
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//
+// Test on correctness in situations where slow paths may be shared
+// (actual sharing may vary between different code generators).
+//
+//
+public class Main {
+
+ // A method with two loops that can be optimized with dynamic BCE,
+ // resulting in a two times a deopt on null, a deopt on lower OOB,
+ // and a deopt on upper OOB.
+ private static void init(int[] x, int [] y, int l1, int h1, int l2, int h2) {
+ for (int i = l1; i < h1; i++) {
+ x[i] = i;
+ }
+ for (int i = l2; i < h2; i++) {
+ y[i] = i;
+ }
+ }
+
+ // Test that each of the six possible exceptions situations for init()
+ // are correctly handled by the deopt instructions.
+ public static void main(String[] args) {
+ int[] x = new int[100];
+ int[] y = new int[100];
+ int z;
+
+ // All is well.
+ z = 0;
+ reset(x, y);
+ try {
+ init(x, y, 0, 100, 0, 100);
+ } catch (Exception e) {
+ z = 1;
+ }
+ expectEquals(z, 0);
+ for (int i = 0; i < 100; i++) {
+ expectEquals(x[i], i);
+ expectEquals(y[i], i);
+ }
+
+ // Null deopt on x.
+ z = 0;
+ reset(x, y);
+ try {
+ init(null, y, 0, 100, 0, 100);
+ } catch (NullPointerException e) {
+ z = 1;
+ }
+ expectEquals(z, 1);
+ for (int i = 0; i < 100; i++) {
+ expectEquals(x[i], 0);
+ expectEquals(y[i], 0);
+ }
+
+ // Lower out-of-bounds on x.
+ z = 0;
+ reset(x, y);
+ try {
+ init(x, y, -1, 100, 0, 100);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ z = 1;
+ }
+ expectEquals(z, 1);
+ for (int i = 0; i < 100; i++) {
+ expectEquals(x[i], 0);
+ expectEquals(y[i], 0);
+ }
+
+ // Upper out-of-bounds on x.
+ z = 0;
+ reset(x, y);
+ try {
+ init(x, y, 0, 101, 0, 100);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ z = 1;
+ }
+ expectEquals(z, 1);
+ for (int i = 0; i < 100; i++) {
+ expectEquals(x[i], i);
+ expectEquals(y[i], 0);
+ }
+
+ // Null deopt on y.
+ z = 0;
+ reset(x, y);
+ try {
+ init(x, null, 0, 100, 0, 100);
+ } catch (NullPointerException e) {
+ z = 1;
+ }
+ expectEquals(z, 1);
+ for (int i = 0; i < 100; i++) {
+ expectEquals(x[i], i);
+ expectEquals(y[i], 0);
+ }
+
+ // Lower out-of-bounds on y.
+ z = 0;
+ reset(x, y);
+ try {
+ init(x, y, 0, 100, -1, 100);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ z = 1;
+ }
+ expectEquals(z, 1);
+ for (int i = 0; i < 100; i++) {
+ expectEquals(x[i], i);
+ expectEquals(y[i], 0);
+ }
+
+ // Upper out-of-bounds on y.
+ z = 0;
+ reset(x, y);
+ try {
+ init(x, y, 0, 100, 0, 101);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ z = 1;
+ }
+ expectEquals(z, 1);
+ for (int i = 0; i < 100; i++) {
+ expectEquals(x[i], i);
+ expectEquals(y[i], i);
+ }
+
+ System.out.println("passed");
+ }
+
+ private static void reset(int[] x, int[] y) {
+ for (int i = 0; i < x.length; i++) x[i] = 0;
+ for (int i = 0; i < y.length; i++) y[i] = 0;
+ }
+
+ private static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}
diff --git a/test/562-bce-preheader/expected.txt b/test/562-bce-preheader/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/562-bce-preheader/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/562-bce-preheader/info.txt b/test/562-bce-preheader/info.txt
new file mode 100644
index 0000000..ae006ac
--- /dev/null
+++ b/test/562-bce-preheader/info.txt
@@ -0,0 +1 @@
+Regression test for correct placement of hoisting/deopting code.
diff --git a/test/562-bce-preheader/src/Main.java b/test/562-bce-preheader/src/Main.java
new file mode 100644
index 0000000..8de0533
--- /dev/null
+++ b/test/562-bce-preheader/src/Main.java
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ /**
+ * Method with an outer countable loop and an inner do-while loop.
+ * Since all work is done in the header of the inner loop, any invariant hoisting
+ * and deopting should be done in its proper loop preheader, not the true-block
+ * of the newly generated taken-test after dynamic BCE.
+ */
+ public static int doit(int[][] x, int j) {
+ float f = 0;
+ int acc = 0;
+ for (int i = 0; i < 2; i++) {
+ // The full body of a do-while loop is the loop header.
+ do {
+ // Some "noise" to avoid hoisting the array reference
+ // before the dynamic BCE phase runs.
+ f++;
+ // The invariant array reference with corresponding bounds check
+ // is a candidate for hoisting when dynamic BCE runs. If it is
+ // not moved to the proper loop preheader, the wrong values
+ // cause the test to fail.
+ acc += x[i][i];
+ } while (++j < i);
+ }
+ return acc;
+ }
+
+ /**
+ * Single countable loop with a clear header and a loop body. In this case,
+ * after dynamic bce, some invariant hoisting and deopting must go to the
+ * proper loop preheader and some must go to the true-block.
+ */
+ public static int foo(int[] x, int[] y, int n) {
+ float f = 0;
+ int acc = 0;
+ int i = 0;
+ while (true) {
+ // This part is the loop header.
+ // Some "noise" to avoid hoisting the array reference
+ // before the dynamic BCE phase runs.
+ f++;
+ // The invariant array reference with corresponding bounds check
+ // is a candidate for hoisting when dynamic BCE runs. If it is
+ // not moved to the proper loop preheader, the wrong values
+ // cause the test to fail.
+ acc += y[0];
+ if (++i > n)
+ break;
+ // From here on, this part is the loop body.
+ // The unit-stride array reference is a candidate for dynamic BCE.
+ // The deopting appears in the true-block.
+ acc += x[i];
+ }
+ return acc;
+ }
+
+ public static void main(String args[]) {
+ int[][] x = new int[2][2];
+ int y;
+
+ x[0][0] = 1;
+ x[1][1] = 2;
+
+ expectEquals(8, doit(x, -6));
+ expectEquals(7, doit(x, -5));
+ expectEquals(6, doit(x, -4));
+ expectEquals(5, doit(x, -3));
+ expectEquals(4, doit(x, -2));
+ expectEquals(3, doit(x, -1));
+ expectEquals(3, doit(x, 0));
+ expectEquals(3, doit(x, 1));
+ expectEquals(3, doit(x, 22));
+
+ int a[] = { 1, 2, 3, 5 };
+ int b[] = { 7 };
+
+ expectEquals(7, foo(a, b, -1));
+ expectEquals(7, foo(a, b, 0));
+ expectEquals(16, foo(a, b, 1));
+ expectEquals(26, foo(a, b, 2));
+ expectEquals(38, foo(a, b, 3));
+
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}
diff --git a/test/562-no-intermediate/expected.txt b/test/562-no-intermediate/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/562-no-intermediate/expected.txt
diff --git a/test/562-no-intermediate/info.txt b/test/562-no-intermediate/info.txt
new file mode 100644
index 0000000..4f21aeb
--- /dev/null
+++ b/test/562-no-intermediate/info.txt
@@ -0,0 +1,2 @@
+Regression test for optimizing, checking that there is no
+intermediate address between a Java call.
diff --git a/test/562-no-intermediate/src/Main.java b/test/562-no-intermediate/src/Main.java
new file mode 100644
index 0000000..3b74d6f
--- /dev/null
+++ b/test/562-no-intermediate/src/Main.java
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ /// CHECK-START-ARM64: int Main.main(String[]) register_allocator (after)
+ /// CHECK-NOT: IntermediateAddress
+ public static void main(String[] args) {
+ array[index] += Math.cos(42);
+ }
+
+ static int index = 0;
+ static double[] array = new double[2];
+}
diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk
index f74a516..b922b45 100644
--- a/test/Android.libarttest.mk
+++ b/test/Android.libarttest.mk
@@ -30,6 +30,7 @@
051-thread/thread_test.cc \
117-nopatchoat/nopatchoat.cc \
1337-gc-coverage/gc_coverage.cc \
+ 136-daemon-jni-shutdown/daemon_jni_shutdown.cc \
137-cfi/cfi.cc \
139-register-natives/regnative.cc \
141-class-unload/jni_unload.cc \
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 569a021..ee6b7aa 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -267,16 +267,6 @@
TEST_ART_BROKEN_PREBUILD_RUN_TESTS :=
-# b/26483935
-TEST_ART_BROKEN_HOST_RUN_TESTS := \
- 132-daemon-locks-shutdown \
-
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,host,$(RUN_TYPES),$(PREBUILD_TYPES), \
- $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_HOST_RUN_TESTS), $(ALL_ADDRESS_SIZES))
-
-TEST_ART_BROKEN_HOST_RUN_TESTS :=
-
# 143-string-value tests for a LOG(E) tag, which is only supported on host.
TEST_ART_BROKEN_TARGET_RUN_TESTS := \
143-string-value \
@@ -682,7 +672,8 @@
$(ART_HOST_OUT_SHARED_LIBRARIES)/libarttestd$(ART_HOST_SHLIB_EXTENSION) \
$(ART_HOST_OUT_SHARED_LIBRARIES)/libnativebridgetest$(ART_HOST_SHLIB_EXTENSION) \
$(ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$(ART_HOST_SHLIB_EXTENSION) \
- $(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdk$(ART_HOST_SHLIB_EXTENSION)
+ $(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdk$(ART_HOST_SHLIB_EXTENSION) \
+ $(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkd$(ART_HOST_SHLIB_EXTENSION)
ifneq ($(HOST_PREFER_32_BIT),true)
ART_TEST_HOST_RUN_TEST_DEPENDENCIES += \
@@ -690,7 +681,8 @@
$(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libarttestd$(ART_HOST_SHLIB_EXTENSION) \
$(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libnativebridgetest$(ART_HOST_SHLIB_EXTENSION) \
$(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$(ART_HOST_SHLIB_EXTENSION) \
- $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdk$(ART_HOST_SHLIB_EXTENSION)
+ $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdk$(ART_HOST_SHLIB_EXTENSION) \
+ $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkd$(ART_HOST_SHLIB_EXTENSION)
endif
# Create a rule to build and run a tests following the form:
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 6e1ec49..f2c810a 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -171,13 +171,6 @@
bug: 25437292
},
{
- description: "Assertion failing on the concurrent collector configuration.",
- result: EXEC_FAILED,
- names: ["jsr166.LinkedTransferQueueTest#testTransfer2",
- "jsr166.LinkedTransferQueueTest#testWaitingConsumer"],
- bug: 25883050
-},
-{
description: "Failing tests after enso move.",
result: EXEC_FAILED,
bug: 26326992,
@@ -249,5 +242,22 @@
"org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testFlush",
"org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testSync",
"org.apache.harmony.tests.java.util.prefs.FilePreferencesImplTest#testPutGet"]
+},
+{
+ description: "libnativehelper_compat_libc++ loading issue",
+ result: EXEC_FAILED,
+ modes: [device],
+ names: ["dalvik.system.JniTest#testGetSuperclass",
+ "dalvik.system.JniTest#testPassingBooleans",
+ "dalvik.system.JniTest#testPassingBytes",
+ "dalvik.system.JniTest#testPassingChars",
+ "dalvik.system.JniTest#testPassingClass",
+ "dalvik.system.JniTest#testPassingDoubles",
+ "dalvik.system.JniTest#testPassingFloats",
+ "dalvik.system.JniTest#testPassingInts",
+ "dalvik.system.JniTest#testPassingLongs",
+ "dalvik.system.JniTest#testPassingObjectReferences",
+ "dalvik.system.JniTest#testPassingShorts",
+ "dalvik.system.JniTest#testPassingThis"]
}
]
diff --git a/tools/libcore_failures_concurrent_collector.txt b/tools/libcore_failures_concurrent_collector.txt
new file mode 100644
index 0000000..3d25d5f
--- /dev/null
+++ b/tools/libcore_failures_concurrent_collector.txt
@@ -0,0 +1,33 @@
+/*
+ * This file contains expectations for ART's buildbot's concurrent collector
+ * configurations. The purpose of this file is to temporary and quickly list
+ * failing tests and not break the bots on the CC configurations, until they
+ * are fixed or until the libcore expectation files get properly updated. The
+ * script that uses this file is art/tools/run-libcore-tests.sh.
+ *
+ * It is also used to enable AOSP experiments, and not mess up with CTS's
+ * expectations.
+ */
+
+[
+{
+ description: "Assertion failing on the concurrent collector configuration.",
+ result: EXEC_FAILED,
+ names: ["jsr166.LinkedTransferQueueTest#testTransfer2",
+ "jsr166.LinkedTransferQueueTest#testWaitingConsumer"],
+ bug: 25883050
+},
+{
+ description: "libcore.java.lang.OldSystemTest#test_gc failure on armv8-concurrent-collector.",
+ result: EXEC_FAILED,
+ names: ["libcore.java.lang.OldSystemTest#test_gc"],
+ bug: 26155567
+},
+{
+ description: "TimeoutException on host-x86-concurrent-collector",
+ result: EXEC_FAILED,
+ names: ["libcore.java.util.zip.ZipFileTest#testZipFileWithLotsOfEntries,
+ libcore.java.util.zip.ZipInputStreamTest#testLongMessage"],
+ bug: 26507762
+}
+]
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index 11ed8b9..f346239 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -32,6 +32,12 @@
exit 1
fi
+expectations="--expectations art/tools/libcore_failures.txt"
+if [ "x$ART_USE_READ_BARRIER" = xtrue ]; then
+ # Tolerate some more failures on the concurrent collector configurations.
+ expectations="$expectations --expectations art/tools/libcore_failures_concurrent_collector.txt"
+fi
+
emulator="no"
if [ "$ANDROID_SERIAL" = "emulator-5554" ]; then
emulator="yes"
@@ -105,4 +111,4 @@
# Run the tests using vogar.
echo "Running tests for the following test packages:"
echo ${working_packages[@]} | tr " " "\n"
-vogar $vogar_args --vm-arg -Xusejit:true --expectations art/tools/libcore_failures.txt --classpath $jsr166_test_jar --classpath $test_jar ${working_packages[@]}
+vogar $vogar_args --vm-arg -Xusejit:true $expectations --classpath $jsr166_test_jar --classpath $test_jar ${working_packages[@]}