Merge "ARM: VIXL32: Implement the functionality needed by intrinsics."
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 59f339a..a37bf4b 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -51,6 +51,7 @@
#include "lock_word.h"
#include "mirror/array-inl.h"
#include "mirror/class-inl.h"
+#include "mirror/class_ext.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache.h"
#include "mirror/dex_cache-inl.h"
@@ -757,7 +758,8 @@
if (klass->GetStatus() == mirror::Class::kStatusError) {
result = true;
} else {
- CHECK(klass->GetVerifyError() == nullptr) << klass->PrettyClass();
+ ObjPtr<mirror::ClassExt> ext(klass->GetExtData());
+ CHECK(ext.IsNull() || ext->GetVerifyError() == nullptr) << klass->PrettyClass();
}
if (!result) {
// Check interfaces since these wont be visited through VisitReferences.)
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index 7cc8b1e..235793d 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -58,22 +58,90 @@
}
/**
- * An upper bound a * (length / a) + b, where a >= 1, can be conservatively rewritten as length + b
- * because length >= 0 is true. This makes it more likely the bound is useful to clients.
+ * Detects an instruction that is >= 0. As long as the value is carried by
+ * a single instruction, arithmetic wrap-around cannot occur.
*/
-static InductionVarRange::Value SimplifyMax(InductionVarRange::Value v) {
- int64_t value;
- if (v.is_known &&
- v.a_constant >= 1 &&
- v.instruction->IsDiv() &&
- v.instruction->InputAt(0)->IsArrayLength() &&
- IsIntAndGet(v.instruction->InputAt(1), &value) && v.a_constant == value) {
- return InductionVarRange::Value(v.instruction->InputAt(0), 1, v.b_constant);
+static bool IsGEZero(HInstruction* instruction) {
+ DCHECK(instruction != nullptr);
+ if (instruction->IsArrayLength()) {
+ return true;
+ } else if (instruction->IsInvokeStaticOrDirect()) {
+ switch (instruction->AsInvoke()->GetIntrinsic()) {
+ case Intrinsics::kMathMinIntInt:
+ case Intrinsics::kMathMinLongLong:
+ // Instruction MIN(>=0, >=0) is >= 0.
+ return IsGEZero(instruction->InputAt(0)) &&
+ IsGEZero(instruction->InputAt(1));
+ case Intrinsics::kMathAbsInt:
+ case Intrinsics::kMathAbsLong:
+ // Instruction ABS(x) is >= 0.
+ return true;
+ default:
+ break;
+ }
+ }
+ int64_t value = -1;
+ return IsIntAndGet(instruction, &value) && value >= 0;
+}
+
+/** Hunts "under the hood" for a suitable instruction at the hint. */
+static bool IsMaxAtHint(
+ HInstruction* instruction, HInstruction* hint, /*out*/HInstruction** suitable) {
+ if (instruction->IsInvokeStaticOrDirect()) {
+ switch (instruction->AsInvoke()->GetIntrinsic()) {
+ case Intrinsics::kMathMinIntInt:
+ case Intrinsics::kMathMinLongLong:
+ // For MIN(x, y), return most suitable x or y as maximum.
+ return IsMaxAtHint(instruction->InputAt(0), hint, suitable) ||
+ IsMaxAtHint(instruction->InputAt(1), hint, suitable);
+ default:
+ break;
+ }
+ } else {
+ *suitable = instruction;
+ while (instruction->IsArrayLength() ||
+ instruction->IsNullCheck() ||
+ instruction->IsNewArray()) {
+ instruction = instruction->InputAt(0);
+ }
+ return instruction == hint;
+ }
+ return false;
+}
+
+/** Post-analysis simplification of a minimum value that makes the bound more useful to clients. */
+static InductionVarRange::Value SimplifyMin(InductionVarRange::Value v) {
+ if (v.is_known && v.a_constant == 1 && v.b_constant <= 0) {
+ // If a == 1, instruction >= 0 and b <= 0, just return the constant b.
+ // No arithmetic wrap-around can occur.
+ if (IsGEZero(v.instruction)) {
+ return InductionVarRange::Value(v.b_constant);
+ }
}
return v;
}
-/** Helper method to test for a constant value. */
+/** Post-analysis simplification of a maximum value that makes the bound more useful to clients. */
+static InductionVarRange::Value SimplifyMax(InductionVarRange::Value v, HInstruction* hint) {
+ if (v.is_known && v.a_constant >= 1) {
+ // An upper bound a * (length / a) + b, where a >= 1, can be conservatively rewritten as
+ // length + b because length >= 0 is true.
+ int64_t value;
+ if (v.instruction->IsDiv() &&
+ v.instruction->InputAt(0)->IsArrayLength() &&
+ IsIntAndGet(v.instruction->InputAt(1), &value) && v.a_constant == value) {
+ return InductionVarRange::Value(v.instruction->InputAt(0), 1, v.b_constant);
+ }
+ // If a == 1, the most suitable one suffices as maximum value.
+ HInstruction* suitable = nullptr;
+ if (v.a_constant == 1 && IsMaxAtHint(v.instruction, hint, &suitable)) {
+ return InductionVarRange::Value(suitable, 1, v.b_constant);
+ }
+ }
+ return v;
+}
+
+/** Tests for a constant value. */
static bool IsConstantValue(InductionVarRange::Value v) {
return v.is_known && v.a_constant == 0;
}
@@ -97,7 +165,7 @@
}
}
-/** Helper method to insert an instruction. */
+/** Inserts an instruction. */
static HInstruction* Insert(HBasicBlock* block, HInstruction* instruction) {
DCHECK(block != nullptr);
DCHECK(block->GetLastInstruction() != nullptr) << block->GetBlockId();
@@ -106,7 +174,7 @@
return instruction;
}
-/** Helper method to obtain loop's control instruction. */
+/** Obtains loop's control instruction. */
static HInstruction* GetLoopControl(HLoopInformation* loop) {
DCHECK(loop != nullptr);
return loop->GetHeader()->GetLastInstruction();
@@ -150,9 +218,14 @@
chase_hint_ = chase_hint;
bool in_body = context->GetBlock() != loop->GetHeader();
int64_t stride_value = 0;
- *min_val = GetVal(info, trip, in_body, /* is_min */ true);
- *max_val = SimplifyMax(GetVal(info, trip, in_body, /* is_min */ false));
+ *min_val = SimplifyMin(GetVal(info, trip, in_body, /* is_min */ true));
+ *max_val = SimplifyMax(GetVal(info, trip, in_body, /* is_min */ false), chase_hint);
*needs_finite_test = NeedsTripCount(info, &stride_value) && IsUnsafeTripCount(trip);
+ chase_hint_ = nullptr;
+ // Retry chasing constants for wrap-around (merge sensitive).
+ if (!min_val->is_known && info->induction_class == HInductionVarAnalysis::kWrapAround) {
+ *min_val = SimplifyMin(GetVal(info, trip, in_body, /* is_min */ true));
+ }
return true;
}
@@ -175,7 +248,7 @@
needs_taken_test)
&& (stride_value == -1 ||
stride_value == 0 ||
- stride_value == 1); // avoid wrap-around anomalies.
+ stride_value == 1); // avoid arithmetic wrap-around anomalies.
}
void InductionVarRange::GenerateRange(HInstruction* context,
@@ -302,7 +375,8 @@
return true;
}
}
- // Try range analysis on the invariant, but only on proper range to avoid wrap-around anomalies.
+ // Try range analysis on the invariant, only accept a proper range
+ // to avoid arithmetic wrap-around anomalies.
Value min_val = GetVal(info, nullptr, /* in_body */ true, /* is_min */ true);
Value max_val = GetVal(info, nullptr, /* in_body */ true, /* is_min */ false);
if (IsConstantValue(min_val) &&
@@ -450,25 +524,26 @@
HInductionVarAnalysis::InductionInfo* trip,
bool in_body,
bool is_min) const {
- // Stop chasing the instruction at constant or hint.
- int64_t value;
- if (IsIntAndGet(instruction, &value) && CanLongValueFitIntoInt(value)) {
- return Value(static_cast<int32_t>(value));
- } else if (instruction == chase_hint_) {
- return Value(instruction, 1, 0);
- }
- // Special cases when encountering a single instruction that denotes trip count in the
- // loop-body: min is 1 and, when chasing constants, max of safe trip-count is max int
- if (in_body && trip != nullptr && instruction == trip->op_a->fetch) {
+ // Special case when chasing constants: single instruction that denotes trip count in the
+ // loop-body is minimal 1 and maximal, with safe trip-count, max int,
+ if (chase_hint_ == nullptr && in_body && trip != nullptr && instruction == trip->op_a->fetch) {
if (is_min) {
return Value(1);
- } else if (chase_hint_ == nullptr && !IsUnsafeTripCount(trip)) {
+ } else if (!IsUnsafeTripCount(trip)) {
return Value(std::numeric_limits<int32_t>::max());
}
}
- // Chase the instruction a bit deeper into the HIR tree, so that it becomes more likely
- // range analysis will compare the same instructions as terminal nodes.
- if (instruction->IsAdd()) {
+ // Unless at a constant or hint, chase the instruction a bit deeper into the HIR tree, so that
+ // it becomes more likely range analysis will compare the same instructions as terminal nodes.
+ int64_t value;
+ if (IsIntAndGet(instruction, &value) && CanLongValueFitIntoInt(value)) {
+ // Proper constant reveals best information.
+ return Value(static_cast<int32_t>(value));
+ } else if (instruction == chase_hint_) {
+ // At hint, fetch is represented by itself.
+ return Value(instruction, 1, 0);
+ } else if (instruction->IsAdd()) {
+ // Incorporate suitable constants in the chased value.
if (IsIntAndGet(instruction->InputAt(0), &value) && CanLongValueFitIntoInt(value)) {
return AddValue(Value(static_cast<int32_t>(value)),
GetFetch(instruction->InputAt(1), trip, in_body, is_min));
@@ -477,14 +552,14 @@
Value(static_cast<int32_t>(value)));
}
} else if (instruction->IsArrayLength()) {
- // Return extreme values when chasing constants. Otherwise, chase deeper.
+ // Exploit length properties when chasing constants or chase into a new array declaration.
if (chase_hint_ == nullptr) {
return is_min ? Value(0) : Value(std::numeric_limits<int32_t>::max());
} else if (instruction->InputAt(0)->IsNewArray()) {
return GetFetch(instruction->InputAt(0)->InputAt(0), trip, in_body, is_min);
}
} else if (instruction->IsTypeConversion()) {
- // Since analysis is 32-bit (or narrower) we allow a widening along the path.
+ // Since analysis is 32-bit (or narrower), chase beyond widening along the path.
if (instruction->AsTypeConversion()->GetInputType() == Primitive::kPrimInt &&
instruction->AsTypeConversion()->GetResultType() == Primitive::kPrimLong) {
return GetFetch(instruction->InputAt(0), trip, in_body, is_min);
@@ -506,6 +581,7 @@
!IsUnsafeTripCount(next_trip)) {
return GetVal(next_info, next_trip, next_in_body, is_min);
}
+ // Fetch is represented by itself.
return Value(instruction, 1, 0);
}
@@ -870,10 +946,11 @@
HInstruction* opb = nullptr;
switch (info->induction_class) {
case HInductionVarAnalysis::kInvariant:
- // Invariants.
+ // Invariants (note that even though is_min does not impact code generation for
+ // invariants, some effort is made to keep this parameter consistent).
switch (info->operation) {
case HInductionVarAnalysis::kAdd:
- case HInductionVarAnalysis::kXor:
+ case HInductionVarAnalysis::kXor: // no proper is_min for second arg
case HInductionVarAnalysis::kLT:
case HInductionVarAnalysis::kLE:
case HInductionVarAnalysis::kGT:
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index e4d280f..e06fdee 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -111,9 +111,11 @@
OptimizingCompilerStats* stats_;
bool simplification_occurred_ = false;
int simplifications_at_current_position_ = 0;
- // We ensure we do not loop infinitely. The value is a finger in the air guess
- // that should allow enough simplification.
- static constexpr int kMaxSamePositionSimplifications = 10;
+ // We ensure we do not loop infinitely. The value should not be too high, since that
+ // would allow looping around the same basic block too many times. The value should
+ // not be too low either, however, since we want to allow revisiting a basic block
+ // with many statements and simplifications at least once.
+ static constexpr int kMaxSamePositionSimplifications = 50;
};
void InstructionSimplifier::Run() {
@@ -605,11 +607,23 @@
return nullptr;
}
+static bool CmpHasBoolType(HInstruction* input, HInstruction* cmp) {
+ if (input->GetType() == Primitive::kPrimBoolean) {
+ return true; // input has direct boolean type
+ } else if (cmp->GetUses().HasExactlyOneElement()) {
+ // Comparison also has boolean type if both its input and the instruction
+ // itself feed into the same phi node.
+ HInstruction* user = cmp->GetUses().front().GetUser();
+ return user->IsPhi() && user->HasInput(input) && user->HasInput(cmp);
+ }
+ return false;
+}
+
void InstructionSimplifierVisitor::VisitEqual(HEqual* equal) {
HInstruction* input_const = equal->GetConstantRight();
if (input_const != nullptr) {
HInstruction* input_value = equal->GetLeastConstantLeft();
- if (input_value->GetType() == Primitive::kPrimBoolean && input_const->IsIntConstant()) {
+ if (CmpHasBoolType(input_value, equal) && input_const->IsIntConstant()) {
HBasicBlock* block = equal->GetBlock();
// We are comparing the boolean to a constant which is of type int and can
// be any constant.
@@ -619,6 +633,7 @@
block->RemoveInstruction(equal);
RecordSimplification();
} else if (input_const->AsIntConstant()->IsFalse()) {
+ // Replace (bool_value == false) with !bool_value
equal->ReplaceWith(GetGraph()->InsertOppositeCondition(input_value, equal));
block->RemoveInstruction(equal);
RecordSimplification();
@@ -640,11 +655,12 @@
HInstruction* input_const = not_equal->GetConstantRight();
if (input_const != nullptr) {
HInstruction* input_value = not_equal->GetLeastConstantLeft();
- if (input_value->GetType() == Primitive::kPrimBoolean && input_const->IsIntConstant()) {
+ if (CmpHasBoolType(input_value, not_equal) && input_const->IsIntConstant()) {
HBasicBlock* block = not_equal->GetBlock();
// We are comparing the boolean to a constant which is of type int and can
// be any constant.
if (input_const->AsIntConstant()->IsTrue()) {
+ // Replace (bool_value != true) with !bool_value
not_equal->ReplaceWith(GetGraph()->InsertOppositeCondition(input_value, not_equal));
block->RemoveInstruction(not_equal);
RecordSimplification();
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 6a45149..ce2edde 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1855,6 +1855,15 @@
size_t InputCount() const { return GetInputRecords().size(); }
HInstruction* InputAt(size_t i) const { return InputRecordAt(i).GetInstruction(); }
+ bool HasInput(HInstruction* input) const {
+ for (const HInstruction* i : GetInputs()) {
+ if (i == input) {
+ return true;
+ }
+ }
+ return false;
+ }
+
void SetRawInputAt(size_t index, HInstruction* input) {
SetRawInputRecordAt(index, HUserRecord<HInstruction*>(input));
}
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 19fd6f9..a484760 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -755,6 +755,8 @@
HDeadCodeElimination* dce1 = new (arena) HDeadCodeElimination(
graph, stats, "dead_code_elimination$initial");
HDeadCodeElimination* dce2 = new (arena) HDeadCodeElimination(
+ graph, stats, "dead_code_elimination$after_inlining");
+ HDeadCodeElimination* dce3 = new (arena) HDeadCodeElimination(
graph, stats, "dead_code_elimination$final");
HConstantFolding* fold1 = new (arena) HConstantFolding(graph);
InstructionSimplifier* simplify1 = new (arena) InstructionSimplifier(graph, stats);
@@ -795,6 +797,7 @@
select_generator,
fold2, // TODO: if we don't inline we can also skip fold2.
simplify2,
+ dce2,
side_effects,
gvn,
licm,
@@ -804,7 +807,7 @@
fold3, // evaluates code generated by dynamic bce
simplify3,
lse,
- dce2,
+ dce3,
// The codegen has a few assumptions that only the instruction simplifier
// can satisfy. For example, the code generator does not expect to see a
// HTypeConversion from a type to the same type.
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 8f961af..9157907 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -122,6 +122,7 @@
"memory_region.cc",
"mirror/array.cc",
"mirror/class.cc",
+ "mirror/class_ext.cc",
"mirror/dex_cache.cc",
"mirror/emulated_stack_frame.cc",
"mirror/executable.cc",
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 1e5e127..5ef1f06 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -172,7 +172,7 @@
#define MIRROR_CLASS_COMPONENT_TYPE_OFFSET (4 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_COMPONENT_TYPE_OFFSET,
art::mirror::Class::ComponentTypeOffset().Int32Value())
-#define MIRROR_CLASS_IF_TABLE_OFFSET (12 + MIRROR_OBJECT_HEADER_SIZE)
+#define MIRROR_CLASS_IF_TABLE_OFFSET (16 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_IF_TABLE_OFFSET,
art::mirror::Class::IfTableOffset().Int32Value())
#define MIRROR_CLASS_ACCESS_FLAGS_OFFSET (64 + MIRROR_OBJECT_HEADER_SIZE)
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index d3d30d4..cab9d30 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -67,6 +67,7 @@
#include "linear_alloc.h"
#include "mirror/class.h"
#include "mirror/class-inl.h"
+#include "mirror/class_ext.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache.h"
#include "mirror/dex_cache-inl.h"
@@ -136,10 +137,22 @@
return exception_init_method != nullptr;
}
-// Helper for ThrowEarlierClassFailure. Throws the stored error.
-static void HandleEarlierVerifyError(Thread* self, ClassLinker* class_linker, ObjPtr<mirror::Class> c)
+static mirror::Object* GetVerifyError(ObjPtr<mirror::Class> c)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ObjPtr<mirror::Object> obj = c->GetVerifyError();
+ ObjPtr<mirror::ClassExt> ext(c->GetExtData());
+ if (ext == nullptr) {
+ return nullptr;
+ } else {
+ return ext->GetVerifyError();
+ }
+}
+
+// Helper for ThrowEarlierClassFailure. Throws the stored error.
+static void HandleEarlierVerifyError(Thread* self,
+ ClassLinker* class_linker,
+ ObjPtr<mirror::Class> c)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ObjPtr<mirror::Object> obj = GetVerifyError(c);
DCHECK(obj != nullptr);
self->AssertNoPendingException();
if (obj->IsClass()) {
@@ -173,8 +186,8 @@
Runtime* const runtime = Runtime::Current();
if (!runtime->IsAotCompiler()) { // Give info if this occurs at runtime.
std::string extra;
- if (c->GetVerifyError() != nullptr) {
- ObjPtr<mirror::Object> verify_error = c->GetVerifyError();
+ if (GetVerifyError(c) != nullptr) {
+ ObjPtr<mirror::Object> verify_error = GetVerifyError(c);
if (verify_error->IsClass()) {
extra = mirror::Class::PrettyDescriptor(verify_error->AsClass());
} else {
@@ -192,11 +205,14 @@
ObjPtr<mirror::Throwable> pre_allocated = runtime->GetPreAllocatedNoClassDefFoundError();
self->SetException(pre_allocated);
} else {
- if (c->GetVerifyError() != nullptr) {
+ if (GetVerifyError(c) != nullptr) {
// Rethrow stored error.
HandleEarlierVerifyError(self, this, c);
}
- if (c->GetVerifyError() == nullptr || wrap_in_no_class_def) {
+ // TODO This might be wrong if we hit an OOME while allocating the ClassExt. In that case we
+ // might have meant to go down the earlier if statement with the original error but it got
+ // swallowed by the OOM so we end up here.
+ if (GetVerifyError(c) == nullptr || wrap_in_no_class_def) {
// If there isn't a recorded earlier error, or this is a repeat throw from initialization,
// the top-level exception must be a NoClassDefFoundError. The potentially already pending
// exception will be a cause.
@@ -495,6 +511,14 @@
java_lang_DexCache->SetObjectSize(mirror::DexCache::InstanceSize());
mirror::Class::SetStatus(java_lang_DexCache, mirror::Class::kStatusResolved, self);
+
+ // Setup dalvik.system.ClassExt
+ Handle<mirror::Class> dalvik_system_ClassExt(hs.NewHandle(
+ AllocClass(self, java_lang_Class.Get(), mirror::ClassExt::ClassSize(image_pointer_size_))));
+ SetClassRoot(kDalvikSystemClassExt, dalvik_system_ClassExt.Get());
+ mirror::ClassExt::SetClass(dalvik_system_ClassExt.Get());
+ mirror::Class::SetStatus(dalvik_system_ClassExt, mirror::Class::kStatusResolved, self);
+
// Set up array classes for string, field, method
Handle<mirror::Class> object_array_string(hs.NewHandle(
AllocClass(self, java_lang_Class.Get(),
@@ -540,7 +564,7 @@
quick_to_interpreter_bridge_trampoline_ = GetQuickToInterpreterBridge();
}
- // Object, String and DexCache need to be rerun through FindSystemClass to finish init
+ // Object, String, ClassExt and DexCache need to be rerun through FindSystemClass to finish init
mirror::Class::SetStatus(java_lang_Object, mirror::Class::kStatusNotReady, self);
CheckSystemClass(self, java_lang_Object, "Ljava/lang/Object;");
CHECK_EQ(java_lang_Object->GetObjectSize(), mirror::Object::InstanceSize());
@@ -549,6 +573,9 @@
mirror::Class::SetStatus(java_lang_DexCache, mirror::Class::kStatusNotReady, self);
CheckSystemClass(self, java_lang_DexCache, "Ljava/lang/DexCache;");
CHECK_EQ(java_lang_DexCache->GetObjectSize(), mirror::DexCache::InstanceSize());
+ mirror::Class::SetStatus(dalvik_system_ClassExt, mirror::Class::kStatusNotReady, self);
+ CheckSystemClass(self, dalvik_system_ClassExt, "Ldalvik/system/ClassExt;");
+ CHECK_EQ(dalvik_system_ClassExt->GetObjectSize(), mirror::ClassExt::InstanceSize());
// Setup the primitive array type classes - can't be done until Object has a vtable.
SetClassRoot(kBooleanArrayClass, FindSystemClass(self, "[Z"));
@@ -1066,6 +1093,7 @@
mirror::Throwable::SetClass(GetClassRoot(kJavaLangThrowable));
mirror::StackTraceElement::SetClass(GetClassRoot(kJavaLangStackTraceElement));
mirror::EmulatedStackFrame::SetClass(GetClassRoot(kDalvikSystemEmulatedStackFrame));
+ mirror::ClassExt::SetClass(GetClassRoot(kDalvikSystemClassExt));
for (gc::space::ImageSpace* image_space : spaces) {
// Boot class loader, use a null handle.
@@ -2578,6 +2606,8 @@
klass.Assign(GetClassRoot(kJavaLangRefReference));
} else if (strcmp(descriptor, "Ljava/lang/DexCache;") == 0) {
klass.Assign(GetClassRoot(kJavaLangDexCache));
+ } else if (strcmp(descriptor, "Ldalvik/system/ClassExt;") == 0) {
+ klass.Assign(GetClassRoot(kDalvikSystemClassExt));
}
}
@@ -8087,6 +8117,7 @@
"[J",
"[S",
"[Ljava/lang/StackTraceElement;",
+ "Ldalvik/system/ClassExt;",
};
static_assert(arraysize(class_roots_descriptors) == size_t(kClassRootsMax),
"Mismatch between class descriptors and class-root enum");
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 4426056..669249f 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -128,6 +128,7 @@
kLongArrayClass,
kShortArrayClass,
kJavaLangStackTraceElementArrayClass,
+ kDalvikSystemClassExt,
kClassRootsMax,
};
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 5878bf3..7a3ebad 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -30,6 +30,7 @@
#include "gc/heap.h"
#include "mirror/accessible_object.h"
#include "mirror/class-inl.h"
+#include "mirror/class_ext.h"
#include "mirror/dex_cache.h"
#include "mirror/emulated_stack_frame.h"
#include "mirror/executable.h"
@@ -586,6 +587,7 @@
addOffset(OFFSETOF_MEMBER(mirror::Class, dex_cache_strings_), "dexCacheStrings");
addOffset(OFFSETOF_MEMBER(mirror::Class, dex_class_def_idx_), "dexClassDefIndex");
addOffset(OFFSETOF_MEMBER(mirror::Class, dex_type_idx_), "dexTypeIndex");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, ext_data_), "extData");
addOffset(OFFSETOF_MEMBER(mirror::Class, ifields_), "iFields");
addOffset(OFFSETOF_MEMBER(mirror::Class, iftable_), "ifTable");
addOffset(OFFSETOF_MEMBER(mirror::Class, methods_), "methods");
@@ -603,12 +605,17 @@
addOffset(OFFSETOF_MEMBER(mirror::Class, sfields_), "sFields");
addOffset(OFFSETOF_MEMBER(mirror::Class, status_), "status");
addOffset(OFFSETOF_MEMBER(mirror::Class, super_class_), "superClass");
- addOffset(OFFSETOF_MEMBER(mirror::Class, verify_error_), "verifyError");
addOffset(OFFSETOF_MEMBER(mirror::Class, virtual_methods_offset_), "virtualMethodsOffset");
addOffset(OFFSETOF_MEMBER(mirror::Class, vtable_), "vtable");
};
};
+struct ClassExtOffsets : public CheckOffsets<mirror::ClassExt> {
+ ClassExtOffsets() : CheckOffsets<mirror::ClassExt>(false, "Ldalvik/system/ClassExt;") {
+ addOffset(OFFSETOF_MEMBER(mirror::ClassExt, verify_error_), "verifyError");
+ }
+};
+
struct StringOffsets : public CheckOffsets<mirror::String> {
StringOffsets() : CheckOffsets<mirror::String>(false, "Ljava/lang/String;") {
addOffset(OFFSETOF_MEMBER(mirror::String, count_), "count");
@@ -757,6 +764,7 @@
ScopedObjectAccess soa(Thread::Current());
EXPECT_TRUE(ObjectOffsets().Check());
EXPECT_TRUE(ClassOffsets().Check());
+ EXPECT_TRUE(ClassExtOffsets().Check());
EXPECT_TRUE(StringOffsets().Check());
EXPECT_TRUE(ThrowableOffsets().Check());
EXPECT_TRUE(StackTraceElementOffsets().Check());
diff --git a/runtime/interpreter/mterp/mips/binop.S b/runtime/interpreter/mterp/mips/binop.S
index 66627e2..862d95a 100644
--- a/runtime/interpreter/mterp/mips/binop.S
+++ b/runtime/interpreter/mterp/mips/binop.S
@@ -30,4 +30,3 @@
$instr # $result <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO($result, rOBJ, t0) # vAA <- $result
- /* 11-14 instructions */
diff --git a/runtime/interpreter/mterp/mips/binop2addr.S b/runtime/interpreter/mterp/mips/binop2addr.S
index 548cbcb..17aa8eb 100644
--- a/runtime/interpreter/mterp/mips/binop2addr.S
+++ b/runtime/interpreter/mterp/mips/binop2addr.S
@@ -25,5 +25,4 @@
$preinstr # optional op
$instr # $result <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO($result, rOBJ, t0) # vAA <- $result
- /* 10-13 instructions */
+ SET_VREG_GOTO($result, rOBJ, t0) # vA <- $result
diff --git a/runtime/interpreter/mterp/mips/binopLit16.S b/runtime/interpreter/mterp/mips/binopLit16.S
index fc0c9ff..0696e7a 100644
--- a/runtime/interpreter/mterp/mips/binopLit16.S
+++ b/runtime/interpreter/mterp/mips/binopLit16.S
@@ -11,12 +11,11 @@
* For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
* rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
*/
- # binop/lit16 vA, vB, /* +CCCC */
+ /* binop/lit16 vA, vB, +CCCC */
FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
GET_OPB(a2) # a2 <- B
- GET_OPA(rOBJ) # rOBJ <- A+
+ GET_OPA4(rOBJ) # rOBJ <- A+
GET_VREG(a0, a2) # a0 <- vB
- and rOBJ, rOBJ, 15
.if $chkzero
# cmp a1, 0; is second operand zero?
beqz a1, common_errDivideByZero
@@ -26,5 +25,4 @@
$preinstr # optional op
$instr # $result <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO($result, rOBJ, t0) # vAA <- $result
- /* 10-13 instructions */
+ SET_VREG_GOTO($result, rOBJ, t0) # vA <- $result
diff --git a/runtime/interpreter/mterp/mips/binopLit8.S b/runtime/interpreter/mterp/mips/binopLit8.S
index a591408..382dd2b 100644
--- a/runtime/interpreter/mterp/mips/binopLit8.S
+++ b/runtime/interpreter/mterp/mips/binopLit8.S
@@ -12,7 +12,7 @@
* rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
- # binop/lit8 vAA, vBB, /* +CC */
+ /* binop/lit8 vAA, vBB, +CC */
FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
GET_OPA(rOBJ) # rOBJ <- AA
and a2, a3, 255 # a2 <- BB
@@ -28,4 +28,3 @@
$instr # $result <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO($result, rOBJ, t0) # vAA <- $result
- /* 10-12 instructions */
diff --git a/runtime/interpreter/mterp/mips/binopWide.S b/runtime/interpreter/mterp/mips/binopWide.S
index 608525b..604134d 100644
--- a/runtime/interpreter/mterp/mips/binopWide.S
+++ b/runtime/interpreter/mterp/mips/binopWide.S
@@ -3,10 +3,10 @@
* Generic 64-bit binary operation. Provide an "instr" line that
* specifies an instruction that performs "result = a0-a1 op a2-a3".
* This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
+ * comes back in a register pair other than a0-a1, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
+ * vCC (a2-a3). Useful for integer division and modulus.
*
* for: add-long, sub-long, div-long, rem-long, and-long, or-long,
* xor-long
@@ -32,4 +32,3 @@
$instr # result <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG64_GOTO($result0, $result1, rOBJ, t0) # vAA/vAA+1 <- $result0/$result1
- /* 14-17 instructions */
diff --git a/runtime/interpreter/mterp/mips/binopWide2addr.S b/runtime/interpreter/mterp/mips/binopWide2addr.S
index cc92149..f96fdb2 100644
--- a/runtime/interpreter/mterp/mips/binopWide2addr.S
+++ b/runtime/interpreter/mterp/mips/binopWide2addr.S
@@ -3,22 +3,21 @@
* Generic 64-bit "/2addr" binary operation. Provide an "instr" line
* that specifies an instruction that performs "result = a0-a1 op a2-a3".
* This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
+ * comes back in a register pair other than a0-a1, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
+ * vB (a2-a3). Useful for integer division and modulus.
*
* For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
* and-long/2addr, or-long/2addr, xor-long/2addr
- * rem-double/2addr
*/
/* binop/2addr vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
GET_OPB(a1) # a1 <- B
EAS2(a1, rFP, a1) # a1 <- &fp[B]
EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64($arg2, $arg3, a1) # a2/a3 <- vBB/vBB+1
- LOAD64($arg0, $arg1, t0) # a0/a1 <- vAA/vAA+1
+ LOAD64($arg2, $arg3, a1) # a2/a3 <- vB/vB+1
+ LOAD64($arg0, $arg1, t0) # a0/a1 <- vA/vA+1
.if $chkzero
or t0, $arg2, $arg3 # second arg (a2-a3) is zero?
beqz t0, common_errDivideByZero
@@ -28,6 +27,4 @@
$preinstr # optional op
$instr # result <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64($result0, $result1, rOBJ) # vAA/vAA+1 <- $result0/$result1
- GOTO_OPCODE(t0) # jump to next instruction
- /* 12-15 instructions */
+ SET_VREG64_GOTO($result0, $result1, rOBJ, t0) # vA/vA+1 <- $result0/$result1
diff --git a/runtime/interpreter/mterp/mips/fbinop.S b/runtime/interpreter/mterp/mips/fbinop.S
index d0d39ae..6c1468c 100644
--- a/runtime/interpreter/mterp/mips/fbinop.S
+++ b/runtime/interpreter/mterp/mips/fbinop.S
@@ -6,7 +6,7 @@
/* binop vAA, vBB, vCC */
FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # s5 <- AA
+ GET_OPA(rOBJ) # rOBJ <- AA
srl a3, a0, 8 # a3 <- CC
and a2, a0, 255 # a2 <- BB
GET_VREG_F(fa1, a3) # a1 <- vCC
@@ -14,6 +14,5 @@
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
$instr # f0 = result
- SET_VREG_F(fv0, rOBJ) # vAA <- fv0
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG_F_GOTO(fv0, rOBJ, t0) # vAA <- fv0
diff --git a/runtime/interpreter/mterp/mips/fbinop2addr.S b/runtime/interpreter/mterp/mips/fbinop2addr.S
index ccb67b1..2caaf9c 100644
--- a/runtime/interpreter/mterp/mips/fbinop2addr.S
+++ b/runtime/interpreter/mterp/mips/fbinop2addr.S
@@ -1,19 +1,18 @@
/*
* Generic 32-bit "/2addr" binary operation. Provide an "instr"
- * that specifies an instruction that performs "result = a0 op a1".
+ * that specifies an instruction that performs "fv0 = fa0 op fa1".
* This could be an MIPS instruction or a function call.
*
* For: add-float/2addr, sub-float/2addr, mul-float/2addr,
- * div-float/2addr, rem-float/2addr
+ * div-float/2addr, rem-float/2addr
*/
/* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPA4(rOBJ) # rOBJ <- A+
GET_OPB(a3) # a3 <- B
GET_VREG_F(fa0, rOBJ)
GET_VREG_F(fa1, a3)
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
$instr
- SET_VREG_F(fv0, rOBJ) # vAA <- result
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- result
diff --git a/runtime/interpreter/mterp/mips/fbinopWide.S b/runtime/interpreter/mterp/mips/fbinopWide.S
index 3be9325..a1fe91e 100644
--- a/runtime/interpreter/mterp/mips/fbinopWide.S
+++ b/runtime/interpreter/mterp/mips/fbinopWide.S
@@ -1,6 +1,6 @@
/*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * Generic 64-bit floating-point binary operation. Provide an "instr"
+ * line that specifies an instruction that performs "fv0 = fa0 op fa1".
* This could be an MIPS instruction or a function call.
*
* for: add-double, sub-double, mul-double, div-double,
@@ -9,7 +9,7 @@
*/
/* binop vAA, vBB, vCC */
FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # s5 <- AA
+ GET_OPA(rOBJ) # rOBJ <- AA
and a2, a0, 255 # a2 <- BB
srl a3, a0, 8 # a3 <- CC
EAS2(a2, rFP, a2) # a2 <- &fp[BB]
@@ -19,10 +19,5 @@
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
$instr
- SET_VREG64_F(fv0, fv0f, rOBJ)
- b .L${opcode}_finish
-%break
-
-.L${opcode}_finish:
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vAA/vAA+1 <- fv0
diff --git a/runtime/interpreter/mterp/mips/fbinopWide2addr.S b/runtime/interpreter/mterp/mips/fbinopWide2addr.S
index 8541f11..7303441 100644
--- a/runtime/interpreter/mterp/mips/fbinopWide2addr.S
+++ b/runtime/interpreter/mterp/mips/fbinopWide2addr.S
@@ -1,10 +1,11 @@
/*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * Generic 64-bit floating-point "/2addr" binary operation.
+ * Provide an "instr" line that specifies an instruction that
+ * performs "fv0 = fa0 op fa1".
* This could be an MIPS instruction or a function call.
*
* For: add-double/2addr, sub-double/2addr, mul-double/2addr,
- * div-double/2addr, rem-double/2addr
+ * div-double/2addr, rem-double/2addr
*/
/* binop/2addr vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
@@ -16,6 +17,5 @@
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
$instr
- SET_VREG64_F(fv0, fv0f, rOBJ)
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- fv0
diff --git a/runtime/interpreter/mterp/mips/funop.S b/runtime/interpreter/mterp/mips/funop.S
index bfb9346..b2b22c9 100644
--- a/runtime/interpreter/mterp/mips/funop.S
+++ b/runtime/interpreter/mterp/mips/funop.S
@@ -1,18 +1,15 @@
/*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op a0".
+ * Generic 32-bit floating-point unary operation. Provide an "instr"
+ * line that specifies an instruction that performs "fv0 = op fa0".
* This could be a MIPS instruction or a function call.
*
- * for: int-to-float, float-to-int
+ * for: int-to-float
*/
/* unop vA, vB */
GET_OPB(a3) # a3 <- B
- GET_OPA4(rOBJ) # t0 <- A+
+ GET_OPA4(rOBJ) # rOBJ <- A+
GET_VREG_F(fa0, a3)
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
$instr
-
-.L${opcode}_set_vreg_f:
- SET_VREG_F(fv0, rOBJ)
GET_INST_OPCODE(t1) # extract opcode from rINST
- GOTO_OPCODE(t1) # jump to next instruction
+ SET_VREG_F_GOTO(fv0, rOBJ, t1) # vA <- fv0
diff --git a/runtime/interpreter/mterp/mips/funopWide.S b/runtime/interpreter/mterp/mips/funopWide.S
deleted file mode 100644
index 3d4cf22..0000000
--- a/runtime/interpreter/mterp/mips/funopWide.S
+++ /dev/null
@@ -1,22 +0,0 @@
-%default {"preinstr":"", "ld_arg":"LOAD64_F(fa0, fa0f, a3)", "st_result":"SET_VREG64_F(fv0, fv0f, rOBJ)"}
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op a0/a1".
- * This could be a MIPS instruction or a function call.
- *
- * long-to-double, double-to-long
- */
- /* unop vA, vB */
- GET_OPA4(rOBJ) # t1 <- A+
- GET_OPB(a3) # a3 <- B
- EAS2(a3, rFP, a3) # a3 <- &fp[B]
- $ld_arg
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- $preinstr # optional op
- $instr # a0/a1 <- op, a2-a3 changed
-
-.L${opcode}_set_vreg:
- $st_result # vAA <- a0/a1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
- /* 12-13 instructions */
diff --git a/runtime/interpreter/mterp/mips/funopWider.S b/runtime/interpreter/mterp/mips/funopWider.S
index efb85f3..6862e24 100644
--- a/runtime/interpreter/mterp/mips/funopWider.S
+++ b/runtime/interpreter/mterp/mips/funopWider.S
@@ -1,10 +1,8 @@
-%default {"st_result":"SET_VREG64_F(fv0, fv0f, rOBJ)"}
/*
- * Generic 32bit-to-64bit unary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = op a0", where
- * "result" is a 64-bit quantity in a0/a1.
+ * Generic 32bit-to-64bit floating-point unary operation. Provide an "instr"
+ * line that specifies an instruction that performs "fv0 = op fa0".
*
- * For: int-to-double, float-to-long, float-to-double
+ * For: int-to-double, float-to-double
*/
/* unop vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
@@ -12,8 +10,5 @@
GET_VREG_F(fa0, a3)
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
$instr
-
-.L${opcode}_set_vreg:
- $st_result # vA/vA+1 <- a0/a1
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- fv0
diff --git a/runtime/interpreter/mterp/mips/header.S b/runtime/interpreter/mterp/mips/header.S
index a3a6744..0ce7745 100644
--- a/runtime/interpreter/mterp/mips/header.S
+++ b/runtime/interpreter/mterp/mips/header.S
@@ -153,6 +153,58 @@
#define fcc1 $$fcc1
#endif
+#ifdef MIPS32REVGE2
+#define SEB(rd, rt) \
+ seb rd, rt
+#define SEH(rd, rt) \
+ seh rd, rt
+#define INSERT_HIGH_HALF(rd_lo, rt_hi) \
+ ins rd_lo, rt_hi, 16, 16
+#else
+#define SEB(rd, rt) \
+ sll rd, rt, 24; \
+ sra rd, rd, 24
+#define SEH(rd, rt) \
+ sll rd, rt, 16; \
+ sra rd, rd, 16
+/* Clobbers rt_hi on pre-R2. */
+#define INSERT_HIGH_HALF(rd_lo, rt_hi) \
+ sll rt_hi, rt_hi, 16; \
+ or rd_lo, rt_hi
+#endif
+
+#ifdef FPU64
+#define MOVE_TO_FPU_HIGH(r, flo, fhi) \
+ mthc1 r, flo
+#else
+#define MOVE_TO_FPU_HIGH(r, flo, fhi) \
+ mtc1 r, fhi
+#endif
+
+#ifdef MIPS32REVGE6
+#define JR(rt) \
+ jic rt, 0
+#define LSA(rd, rs, rt, sa) \
+ .if sa; \
+ lsa rd, rs, rt, sa; \
+ .else; \
+ addu rd, rs, rt; \
+ .endif
+#else
+#define JR(rt) \
+ jalr zero, rt
+#define LSA(rd, rs, rt, sa) \
+ .if sa; \
+ .set push; \
+ .set noat; \
+ sll AT, rs, sa; \
+ addu rd, AT, rt; \
+ .set pop; \
+ .else; \
+ addu rd, rs, rt; \
+ .endif
+#endif
+
/*
* Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
* to access other shadow frame fields, we need to use a backwards offset. Define those here.
@@ -186,12 +238,12 @@
sw rPC, OFF_FP_DEX_PC_PTR(rFP)
#define EXPORT_DEX_PC(tmp) \
- lw tmp, OFF_FP_CODE_ITEM(rFP) \
- sw rPC, OFF_FP_DEX_PC_PTR(rFP) \
- addu tmp, CODEITEM_INSNS_OFFSET \
- subu tmp, rPC, tmp \
- sra tmp, tmp, 1 \
- sw tmp, OFF_FP_DEX_PC(rFP)
+ lw tmp, OFF_FP_CODE_ITEM(rFP); \
+ sw rPC, OFF_FP_DEX_PC_PTR(rFP); \
+ addu tmp, CODEITEM_INSNS_OFFSET; \
+ subu tmp, rPC, tmp; \
+ sra tmp, tmp, 1; \
+ sw tmp, OFF_FP_DEX_PC(rFP)
/*
* Fetch the next instruction from rPC into rINST. Does not advance rPC.
@@ -206,18 +258,11 @@
* exception catch may miss. (This also implies that it must come after
* EXPORT_PC().)
*/
-#define FETCH_ADVANCE_INST(_count) lhu rINST, ((_count)*2)(rPC); \
+#define FETCH_ADVANCE_INST(_count) \
+ lhu rINST, ((_count)*2)(rPC); \
addu rPC, rPC, ((_count) * 2)
/*
- * The operation performed here is similar to FETCH_ADVANCE_INST, except the
- * src and dest registers are parameterized (not hard-wired to rPC and rINST).
- */
-#define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \
- lhu _dreg, ((_count)*2)(_sreg) ; \
- addu _sreg, _sreg, (_count)*2
-
-/*
* Similar to FETCH_ADVANCE_INST, but does not update rPC. Used to load
* rINST ahead of possible exception point. Be sure to manually advance rPC
* later.
@@ -232,7 +277,8 @@
* rPC to point to the next instruction. "rd" must specify the distance
* in bytes, *not* 16-bit code units, and may be a signed value.
*/
-#define FETCH_ADVANCE_INST_RB(rd) addu rPC, rPC, rd; \
+#define FETCH_ADVANCE_INST_RB(rd) \
+ addu rPC, rPC, rd; \
lhu rINST, (rPC)
/*
@@ -257,38 +303,75 @@
#define GET_INST_OPCODE(rd) and rd, rINST, 0xFF
/*
- * Put the prefetched instruction's opcode field into the specified register.
+ * Transform opcode into branch target address.
*/
-#define GET_PREFETCHED_OPCODE(dreg, sreg) andi dreg, sreg, 255
+#define GET_OPCODE_TARGET(rd) \
+ sll rd, rd, ${handler_size_bits}; \
+ addu rd, rIBASE, rd
/*
* Begin executing the opcode in rd.
*/
-#define GOTO_OPCODE(rd) sll rd, rd, ${handler_size_bits}; \
- addu rd, rIBASE, rd; \
- jalr zero, rd
-
-#define GOTO_OPCODE_BASE(_base, rd) sll rd, rd, ${handler_size_bits}; \
- addu rd, _base, rd; \
- jalr zero, rd
+#define GOTO_OPCODE(rd) \
+ GET_OPCODE_TARGET(rd); \
+ JR(rd)
/*
* Get/set the 32-bit value from a Dalvik register.
*/
#define GET_VREG(rd, rix) LOAD_eas2(rd, rFP, rix)
-#define GET_VREG_F(rd, rix) EAS2(AT, rFP, rix); \
- .set noat; l.s rd, (AT); .set at
+#define GET_VREG_F(rd, rix) \
+ .set noat; \
+ EAS2(AT, rFP, rix); \
+ l.s rd, (AT); \
+ .set at
-#define SET_VREG(rd, rix) .set noat; \
+#ifdef MIPS32REVGE6
+#define SET_VREG(rd, rix) \
+ lsa t8, rix, rFP, 2; \
+ sw rd, 0(t8); \
+ lsa t8, rix, rREFS, 2; \
+ sw zero, 0(t8)
+#else
+#define SET_VREG(rd, rix) \
+ .set noat; \
sll AT, rix, 2; \
addu t8, rFP, AT; \
sw rd, 0(t8); \
addu t8, rREFS, AT; \
.set at; \
sw zero, 0(t8)
+#endif
-#define SET_VREG64(rlo, rhi, rix) .set noat; \
+#ifdef MIPS32REVGE6
+#define SET_VREG_OBJECT(rd, rix) \
+ lsa t8, rix, rFP, 2; \
+ sw rd, 0(t8); \
+ lsa t8, rix, rREFS, 2; \
+ sw rd, 0(t8)
+#else
+#define SET_VREG_OBJECT(rd, rix) \
+ .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ sw rd, 0(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ sw rd, 0(t8)
+#endif
+
+#ifdef MIPS32REVGE6
+#define SET_VREG64(rlo, rhi, rix) \
+ lsa t8, rix, rFP, 2; \
+ sw rlo, 0(t8); \
+ sw rhi, 4(t8); \
+ lsa t8, rix, rREFS, 2; \
+ sw zero, 0(t8); \
+ sw zero, 4(t8)
+#else
+#define SET_VREG64(rlo, rhi, rix) \
+ .set noat; \
sll AT, rix, 2; \
addu t8, rFP, AT; \
sw rlo, 0(t8); \
@@ -297,9 +380,39 @@
.set at; \
sw zero, 0(t8); \
sw zero, 4(t8)
+#endif
-#ifdef FPU64
-#define SET_VREG64_F(rlo, rhi, rix) .set noat; \
+#ifdef MIPS32REVGE6
+#define SET_VREG_F(rd, rix) \
+ lsa t8, rix, rFP, 2; \
+ s.s rd, 0(t8); \
+ lsa t8, rix, rREFS, 2; \
+ sw zero, 0(t8)
+#else
+#define SET_VREG_F(rd, rix) \
+ .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ s.s rd, 0(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ sw zero, 0(t8)
+#endif
+
+#ifdef MIPS32REVGE6
+#define SET_VREG64_F(rlo, rhi, rix) \
+ lsa t8, rix, rFP, 2; \
+ .set noat; \
+ mfhc1 AT, rlo; \
+ s.s rlo, 0(t8); \
+ sw AT, 4(t8); \
+ .set at; \
+ lsa t8, rix, rREFS, 2; \
+ sw zero, 0(t8); \
+ sw zero, 4(t8)
+#elif defined(FPU64)
+#define SET_VREG64_F(rlo, rhi, rix) \
+ .set noat; \
sll AT, rix, 2; \
addu t8, rREFS, AT; \
sw zero, 0(t8); \
@@ -310,7 +423,8 @@
.set at; \
s.s rlo, 0(t8)
#else
-#define SET_VREG64_F(rlo, rhi, rix) .set noat; \
+#define SET_VREG64_F(rlo, rhi, rix) \
+ .set noat; \
sll AT, rix, 2; \
addu t8, rFP, AT; \
s.s rlo, 0(t8); \
@@ -321,18 +435,21 @@
sw zero, 4(t8)
#endif
-#define SET_VREG_OBJECT(rd, rix) .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- sw rd, 0(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- sw rd, 0(t8)
-
/* Combination of the SET_VREG and GOTO_OPCODE functions to save 1 instruction */
-#define SET_VREG_GOTO(rd, rix, dst) .set noreorder; \
- sll dst, dst, ${handler_size_bits}; \
- addu dst, rIBASE, dst; \
+#ifdef MIPS32REVGE6
+#define SET_VREG_GOTO(rd, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
+ lsa t8, rix, rFP, 2; \
+ sw rd, 0(t8); \
+ lsa t8, rix, rREFS, 2; \
+ jalr zero, dst; \
+ sw zero, 0(t8); \
+ .set reorder
+#else
+#define SET_VREG_GOTO(rd, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
.set noat; \
sll AT, rix, 2; \
addu t8, rFP, AT; \
@@ -342,11 +459,51 @@
jalr zero, dst; \
sw zero, 0(t8); \
.set reorder
+#endif
+
+/* Combination of the SET_VREG_OBJECT and GOTO_OPCODE functions to save 1 instruction */
+#ifdef MIPS32REVGE6
+#define SET_VREG_OBJECT_GOTO(rd, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
+ lsa t8, rix, rFP, 2; \
+ sw rd, 0(t8); \
+ lsa t8, rix, rREFS, 2; \
+ jalr zero, dst; \
+ sw rd, 0(t8); \
+ .set reorder
+#else
+#define SET_VREG_OBJECT_GOTO(rd, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
+ .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ sw rd, 0(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ jalr zero, dst; \
+ sw rd, 0(t8); \
+ .set reorder
+#endif
/* Combination of the SET_VREG64 and GOTO_OPCODE functions to save 1 instruction */
-#define SET_VREG64_GOTO(rlo, rhi, rix, dst) .set noreorder; \
- sll dst, dst, ${handler_size_bits}; \
- addu dst, rIBASE, dst; \
+#ifdef MIPS32REVGE6
+#define SET_VREG64_GOTO(rlo, rhi, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
+ lsa t8, rix, rFP, 2; \
+ sw rlo, 0(t8); \
+ sw rhi, 4(t8); \
+ lsa t8, rix, rREFS, 2; \
+ sw zero, 0(t8); \
+ jalr zero, dst; \
+ sw zero, 4(t8); \
+ .set reorder
+#else
+#define SET_VREG64_GOTO(rlo, rhi, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
.set noat; \
sll AT, rix, 2; \
addu t8, rFP, AT; \
@@ -358,14 +515,82 @@
jalr zero, dst; \
sw zero, 4(t8); \
.set reorder
+#endif
-#define SET_VREG_F(rd, rix) .set noat; \
+/* Combination of the SET_VREG_F and GOTO_OPCODE functions to save 1 instruction */
+#ifdef MIPS32REVGE6
+#define SET_VREG_F_GOTO(rd, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
+ lsa t8, rix, rFP, 2; \
+ s.s rd, 0(t8); \
+ lsa t8, rix, rREFS, 2; \
+ jalr zero, dst; \
+ sw zero, 0(t8); \
+ .set reorder
+#else
+#define SET_VREG_F_GOTO(rd, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
+ .set noat; \
sll AT, rix, 2; \
addu t8, rFP, AT; \
s.s rd, 0(t8); \
addu t8, rREFS, AT; \
.set at; \
- sw zero, 0(t8)
+ jalr zero, dst; \
+ sw zero, 0(t8); \
+ .set reorder
+#endif
+
+/* Combination of the SET_VREG64_F and GOTO_OPCODE functions to save 1 instruction */
+#ifdef MIPS32REVGE6
+#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
+ lsa t8, rix, rFP, 2; \
+ .set noat; \
+ mfhc1 AT, rlo; \
+ s.s rlo, 0(t8); \
+ sw AT, 4(t8); \
+ .set at; \
+ lsa t8, rix, rREFS, 2; \
+ sw zero, 0(t8); \
+ jalr zero, dst; \
+ sw zero, 4(t8); \
+ .set reorder
+#elif defined(FPU64)
+#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
+ .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rREFS, AT; \
+ sw zero, 0(t8); \
+ sw zero, 4(t8); \
+ addu t8, rFP, AT; \
+ mfhc1 AT, rlo; \
+ sw AT, 4(t8); \
+ .set at; \
+ jalr zero, dst; \
+ s.s rlo, 0(t8); \
+ .set reorder
+#else
+#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
+ .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ s.s rlo, 0(t8); \
+ s.s rhi, 4(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ sw zero, 0(t8); \
+ jalr zero, dst; \
+ sw zero, 4(t8); \
+ .set reorder
+#endif
#define GET_OPA(rd) srl rd, rINST, 8
#ifdef MIPS32REVGE2
@@ -376,60 +601,60 @@
#define GET_OPB(rd) srl rd, rINST, 12
/*
- * Form an Effective Address rd = rbase + roff<<n;
- * Uses reg AT
+ * Form an Effective Address rd = rbase + roff<<shift;
+ * Uses reg AT on pre-R6.
*/
-#define EASN(rd, rbase, roff, rshift) .set noat; \
- sll AT, roff, rshift; \
- addu rd, rbase, AT; \
- .set at
+#define EASN(rd, rbase, roff, shift) LSA(rd, roff, rbase, shift)
#define EAS1(rd, rbase, roff) EASN(rd, rbase, roff, 1)
#define EAS2(rd, rbase, roff) EASN(rd, rbase, roff, 2)
#define EAS3(rd, rbase, roff) EASN(rd, rbase, roff, 3)
#define EAS4(rd, rbase, roff) EASN(rd, rbase, roff, 4)
-/*
- * Form an Effective Shift Right rd = rbase + roff>>n;
- * Uses reg AT
- */
-#define ESRN(rd, rbase, roff, rshift) .set noat; \
- srl AT, roff, rshift; \
- addu rd, rbase, AT; \
+#define LOAD_eas2(rd, rbase, roff) \
+ .set noat; \
+ EAS2(AT, rbase, roff); \
+ lw rd, 0(AT); \
.set at
-#define LOAD_eas2(rd, rbase, roff) EAS2(AT, rbase, roff); \
- .set noat; lw rd, 0(AT); .set at
-
-#define STORE_eas2(rd, rbase, roff) EAS2(AT, rbase, roff); \
- .set noat; sw rd, 0(AT); .set at
+#define STORE_eas2(rd, rbase, roff) \
+ .set noat; \
+ EAS2(AT, rbase, roff); \
+ sw rd, 0(AT); \
+ .set at
#define LOAD_RB_OFF(rd, rbase, off) lw rd, off(rbase)
#define STORE_RB_OFF(rd, rbase, off) sw rd, off(rbase)
-#define STORE64_off(rlo, rhi, rbase, off) sw rlo, off(rbase); \
+#define STORE64_off(rlo, rhi, rbase, off) \
+ sw rlo, off(rbase); \
sw rhi, (off+4)(rbase)
-#define LOAD64_off(rlo, rhi, rbase, off) lw rlo, off(rbase); \
+#define LOAD64_off(rlo, rhi, rbase, off) \
+ lw rlo, off(rbase); \
lw rhi, (off+4)(rbase)
#define STORE64(rlo, rhi, rbase) STORE64_off(rlo, rhi, rbase, 0)
#define LOAD64(rlo, rhi, rbase) LOAD64_off(rlo, rhi, rbase, 0)
#ifdef FPU64
-#define STORE64_off_F(rlo, rhi, rbase, off) s.s rlo, off(rbase); \
+#define STORE64_off_F(rlo, rhi, rbase, off) \
+ s.s rlo, off(rbase); \
.set noat; \
mfhc1 AT, rlo; \
sw AT, (off+4)(rbase); \
.set at
-#define LOAD64_off_F(rlo, rhi, rbase, off) l.s rlo, off(rbase); \
+#define LOAD64_off_F(rlo, rhi, rbase, off) \
+ l.s rlo, off(rbase); \
.set noat; \
lw AT, (off+4)(rbase); \
mthc1 AT, rlo; \
.set at
#else
-#define STORE64_off_F(rlo, rhi, rbase, off) s.s rlo, off(rbase); \
+#define STORE64_off_F(rlo, rhi, rbase, off) \
+ s.s rlo, off(rbase); \
s.s rhi, (off+4)(rbase)
-#define LOAD64_off_F(rlo, rhi, rbase, off) l.s rlo, off(rbase); \
+#define LOAD64_off_F(rlo, rhi, rbase, off) \
+ l.s rlo, off(rbase); \
l.s rhi, (off+4)(rbase)
#endif
@@ -490,3 +715,11 @@
#define REFRESH_IBASE() \
lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
+
+/* Constants for float/double_to_int/long conversions */
+#define INT_MIN 0x80000000
+#define INT_MIN_AS_FLOAT 0xCF000000
+#define INT_MIN_AS_DOUBLE_HIGH 0xC1E00000
+#define LONG_MIN_HIGH 0x80000000
+#define LONG_MIN_AS_FLOAT 0xDF000000
+#define LONG_MIN_AS_DOUBLE_HIGH 0xC3E00000
diff --git a/runtime/interpreter/mterp/mips/invoke.S b/runtime/interpreter/mterp/mips/invoke.S
index bcd3a57..db3b8af 100644
--- a/runtime/interpreter/mterp/mips/invoke.S
+++ b/runtime/interpreter/mterp/mips/invoke.S
@@ -2,8 +2,8 @@
/*
* Generic invoke handler wrapper.
*/
- # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
- # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
.extern $helper
EXPORT_PC()
move a0, rSELF
diff --git a/runtime/interpreter/mterp/mips/op_aget.S b/runtime/interpreter/mterp/mips/op_aget.S
index 8aa8992..e88402c 100644
--- a/runtime/interpreter/mterp/mips/op_aget.S
+++ b/runtime/interpreter/mterp/mips/op_aget.S
@@ -19,11 +19,7 @@
# null array object?
beqz a0, common_errNullObject # yes, bail
LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- .if $shift
EASN(a0, a0, a1, $shift) # a0 <- arrayObj + index*width
- .else
- addu a0, a0, a1
- .endif
# a1 >= a3; compare unsigned index
bgeu a1, a3, common_errArrayIndex # index >= length, bail
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
diff --git a/runtime/interpreter/mterp/mips/op_aget_object.S b/runtime/interpreter/mterp/mips/op_aget_object.S
index e3ab9d8..9c49dfe 100644
--- a/runtime/interpreter/mterp/mips/op_aget_object.S
+++ b/runtime/interpreter/mterp/mips/op_aget_object.S
@@ -14,7 +14,6 @@
lw a1, THREAD_EXCEPTION_OFFSET(rSELF)
PREFETCH_INST(2) # load rINST
bnez a1, MterpException
- SET_VREG_OBJECT(v0, rOBJ) # vAA <- v0
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG_OBJECT_GOTO(v0, rOBJ, t0) # vAA <- v0
diff --git a/runtime/interpreter/mterp/mips/op_aput.S b/runtime/interpreter/mterp/mips/op_aput.S
index 53d6ae0..46dcaee 100644
--- a/runtime/interpreter/mterp/mips/op_aput.S
+++ b/runtime/interpreter/mterp/mips/op_aput.S
@@ -17,14 +17,11 @@
# null array object?
beqz a0, common_errNullObject # yes, bail
LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- .if $shift
EASN(a0, a0, a1, $shift) # a0 <- arrayObj + index*width
- .else
- addu a0, a0, a1
- .endif
bgeu a1, a3, common_errArrayIndex # index >= length, bail
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
GET_VREG(a2, rOBJ) # a2 <- vAA
GET_INST_OPCODE(t0) # extract opcode from rINST
+ GET_OPCODE_TARGET(t0)
$store a2, $data_offset(a0) # vBB[vCC] <- a2
- GOTO_OPCODE(t0) # jump to next instruction
+ JR(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_aput_wide.S b/runtime/interpreter/mterp/mips/op_aput_wide.S
index ef99261..c3cff56 100644
--- a/runtime/interpreter/mterp/mips/op_aput_wide.S
+++ b/runtime/interpreter/mterp/mips/op_aput_wide.S
@@ -1,7 +1,5 @@
/*
* Array put, 64 bits. vBB[vCC] <- vAA.
- *
- * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
*/
/* aput-wide vAA, vBB, vCC */
FETCH(a0, 1) # a0 <- CCBB
@@ -21,5 +19,6 @@
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
LOAD64(a2, a3, rOBJ) # a2/a3 <- vAA/vAA+1
GET_INST_OPCODE(t0) # extract opcode from rINST
+ GET_OPCODE_TARGET(t0)
STORE64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET) # a2/a3 <- vBB[vCC]
- GOTO_OPCODE(t0) # jump to next instruction
+ JR(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_array_length.S b/runtime/interpreter/mterp/mips/op_array_length.S
index 2b4a86f..ae2fe68 100644
--- a/runtime/interpreter/mterp/mips/op_array_length.S
+++ b/runtime/interpreter/mterp/mips/op_array_length.S
@@ -1,6 +1,7 @@
/*
* Return the length of an array.
*/
+ /* array-length vA, vB */
GET_OPB(a1) # a1 <- B
GET_OPA4(a2) # a2 <- A+
GET_VREG(a0, a1) # a0 <- vB (object ref)
diff --git a/runtime/interpreter/mterp/mips/op_check_cast.S b/runtime/interpreter/mterp/mips/op_check_cast.S
index 9a6cefa..3875ce6 100644
--- a/runtime/interpreter/mterp/mips/op_check_cast.S
+++ b/runtime/interpreter/mterp/mips/op_check_cast.S
@@ -1,7 +1,7 @@
/*
* Check to see if a cast from one class to another is allowed.
*/
- # check-cast vAA, class /* BBBB */
+ /* check-cast vAA, class@BBBB */
EXPORT_PC()
FETCH(a0, 1) # a0 <- BBBB
GET_OPA(a1) # a1 <- AA
diff --git a/runtime/interpreter/mterp/mips/op_cmpg_double.S b/runtime/interpreter/mterp/mips/op_cmpg_double.S
index e7965a7..b2e7532 100644
--- a/runtime/interpreter/mterp/mips/op_cmpg_double.S
+++ b/runtime/interpreter/mterp/mips/op_cmpg_double.S
@@ -1 +1 @@
-%include "mips/op_cmpl_double.S" { "naninst":"li rTEMP, 1" }
+%include "mips/op_cmpl_double.S" { "gt_bias":"1" }
diff --git a/runtime/interpreter/mterp/mips/op_cmpg_float.S b/runtime/interpreter/mterp/mips/op_cmpg_float.S
index 53519a6..76550b5 100644
--- a/runtime/interpreter/mterp/mips/op_cmpg_float.S
+++ b/runtime/interpreter/mterp/mips/op_cmpg_float.S
@@ -1 +1 @@
-%include "mips/op_cmpl_float.S" { "naninst":"li rTEMP, 1" }
+%include "mips/op_cmpl_float.S" { "gt_bias":"1" }
diff --git a/runtime/interpreter/mterp/mips/op_cmpl_double.S b/runtime/interpreter/mterp/mips/op_cmpl_double.S
index db89242..369e5b3 100644
--- a/runtime/interpreter/mterp/mips/op_cmpl_double.S
+++ b/runtime/interpreter/mterp/mips/op_cmpl_double.S
@@ -1,53 +1,51 @@
-%default { "naninst":"li rTEMP, -1" }
+%default { "gt_bias":"0" }
/*
* Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
- * into the destination register (rTEMP) based on the comparison results.
- *
- * Provide a "naninst" instruction that puts 1 or -1 into rTEMP depending
- * on what value we'd like to return when one of the operands is NaN.
- *
- * See op_cmpl_float for more details.
+ * into the destination register based on the comparison results.
*
* For: cmpl-double, cmpg-double
*/
/* op vAA, vBB, vCC */
FETCH(a0, 1) # a0 <- CCBB
- and rOBJ, a0, 255 # s5 <- BB
+ and rOBJ, a0, 255 # rOBJ <- BB
srl t0, a0, 8 # t0 <- CC
- EAS2(rOBJ, rFP, rOBJ) # s5 <- &fp[BB]
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[BB]
EAS2(t0, rFP, t0) # t0 <- &fp[CC]
LOAD64_F(ft0, ft0f, rOBJ)
LOAD64_F(ft1, ft1f, t0)
#ifdef MIPS32REVGE6
- cmp.lt.d ft2, ft0, ft1
- li rTEMP, -1
- bc1nez ft2, .L${opcode}_finish
- cmp.lt.d ft2, ft1, ft0
- li rTEMP, 1
- bc1nez ft2, .L${opcode}_finish
cmp.eq.d ft2, ft0, ft1
li rTEMP, 0
- bc1nez ft2, .L${opcode}_finish
- b .L${opcode}_nan
-#else
- c.olt.d fcc0, ft0, ft1
+ bc1nez ft2, 1f # done if vBB == vCC (ordered)
+ .if $gt_bias
+ cmp.lt.d ft2, ft0, ft1
li rTEMP, -1
- bc1t fcc0, .L${opcode}_finish
- c.olt.d fcc0, ft1, ft0
+ bc1nez ft2, 1f # done if vBB < vCC (ordered)
+ li rTEMP, 1 # vBB > vCC or unordered
+ .else
+ cmp.lt.d ft2, ft1, ft0
li rTEMP, 1
- bc1t fcc0, .L${opcode}_finish
+ bc1nez ft2, 1f # done if vBB > vCC (ordered)
+ li rTEMP, -1 # vBB < vCC or unordered
+ .endif
+#else
c.eq.d fcc0, ft0, ft1
li rTEMP, 0
- bc1t fcc0, .L${opcode}_finish
- b .L${opcode}_nan
+ bc1t fcc0, 1f # done if vBB == vCC (ordered)
+ .if $gt_bias
+ c.olt.d fcc0, ft0, ft1
+ li rTEMP, -1
+ bc1t fcc0, 1f # done if vBB < vCC (ordered)
+ li rTEMP, 1 # vBB > vCC or unordered
+ .else
+ c.olt.d fcc0, ft1, ft0
+ li rTEMP, 1
+ bc1t fcc0, 1f # done if vBB > vCC (ordered)
+ li rTEMP, -1 # vBB < vCC or unordered
+ .endif
#endif
-%break
-
-.L${opcode}_nan:
- $naninst
-
-.L${opcode}_finish:
+1:
GET_OPA(rOBJ)
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
GET_INST_OPCODE(t0) # extract opcode from rINST
diff --git a/runtime/interpreter/mterp/mips/op_cmpl_float.S b/runtime/interpreter/mterp/mips/op_cmpl_float.S
index b8c0961..1dd5506 100644
--- a/runtime/interpreter/mterp/mips/op_cmpl_float.S
+++ b/runtime/interpreter/mterp/mips/op_cmpl_float.S
@@ -1,60 +1,49 @@
-%default { "naninst":"li rTEMP, -1" }
+%default { "gt_bias":"0" }
/*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register rTEMP based on the results of the comparison.
- *
- * Provide a "naninst" instruction that puts 1 or -1 into rTEMP depending
- * on what value we'd like to return when one of the operands is NaN.
- *
- * The operation we're implementing is:
- * if (x == y)
- * return 0;
- * else if (x < y)
- * return -1;
- * else if (x > y)
- * return 1;
- * else
- * return {-1 or 1}; // one or both operands was NaN
+ * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
+ * into the destination register based on the comparison results.
*
* for: cmpl-float, cmpg-float
*/
/* op vAA, vBB, vCC */
- /* "clasic" form */
FETCH(a0, 1) # a0 <- CCBB
and a2, a0, 255 # a2 <- BB
srl a3, a0, 8
GET_VREG_F(ft0, a2)
GET_VREG_F(ft1, a3)
#ifdef MIPS32REVGE6
- cmp.lt.s ft2, ft0, ft1 # Is ft0 < ft1
- li rTEMP, -1
- bc1nez ft2, .L${opcode}_finish
- cmp.lt.s ft2, ft1, ft0
- li rTEMP, 1
- bc1nez ft2, .L${opcode}_finish
cmp.eq.s ft2, ft0, ft1
li rTEMP, 0
- bc1nez ft2, .L${opcode}_finish
- b .L${opcode}_nan
-#else
- c.olt.s fcc0, ft0, ft1 # Is ft0 < ft1
+ bc1nez ft2, 1f # done if vBB == vCC (ordered)
+ .if $gt_bias
+ cmp.lt.s ft2, ft0, ft1
li rTEMP, -1
- bc1t fcc0, .L${opcode}_finish
- c.olt.s fcc0, ft1, ft0
+ bc1nez ft2, 1f # done if vBB < vCC (ordered)
+ li rTEMP, 1 # vBB > vCC or unordered
+ .else
+ cmp.lt.s ft2, ft1, ft0
li rTEMP, 1
- bc1t fcc0, .L${opcode}_finish
+ bc1nez ft2, 1f # done if vBB > vCC (ordered)
+ li rTEMP, -1 # vBB < vCC or unordered
+ .endif
+#else
c.eq.s fcc0, ft0, ft1
li rTEMP, 0
- bc1t fcc0, .L${opcode}_finish
- b .L${opcode}_nan
+ bc1t fcc0, 1f # done if vBB == vCC (ordered)
+ .if $gt_bias
+ c.olt.s fcc0, ft0, ft1
+ li rTEMP, -1
+ bc1t fcc0, 1f # done if vBB < vCC (ordered)
+ li rTEMP, 1 # vBB > vCC or unordered
+ .else
+ c.olt.s fcc0, ft1, ft0
+ li rTEMP, 1
+ bc1t fcc0, 1f # done if vBB > vCC (ordered)
+ li rTEMP, -1 # vBB < vCC or unordered
+ .endif
#endif
-%break
-
-.L${opcode}_nan:
- $naninst
-
-.L${opcode}_finish:
+1:
GET_OPA(rOBJ)
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
GET_INST_OPCODE(t0) # extract opcode from rINST
diff --git a/runtime/interpreter/mterp/mips/op_const.S b/runtime/interpreter/mterp/mips/op_const.S
index c505761..bd9f873 100644
--- a/runtime/interpreter/mterp/mips/op_const.S
+++ b/runtime/interpreter/mterp/mips/op_const.S
@@ -1,9 +1,8 @@
- # const vAA, /* +BBBBbbbb */
+ /* const vAA, +BBBBbbbb */
GET_OPA(a3) # a3 <- AA
FETCH(a0, 1) # a0 <- bbbb (low)
FETCH(a1, 2) # a1 <- BBBB (high)
FETCH_ADVANCE_INST(3) # advance rPC, load rINST
- sll a1, a1, 16
- or a0, a1, a0 # a0 <- BBBBbbbb
+ INSERT_HIGH_HALF(a0, a1) # a0 <- BBBBbbbb
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(a0, a3, t0) # vAA <- a0
diff --git a/runtime/interpreter/mterp/mips/op_const_16.S b/runtime/interpreter/mterp/mips/op_const_16.S
index 5e47633..2ffb30f 100644
--- a/runtime/interpreter/mterp/mips/op_const_16.S
+++ b/runtime/interpreter/mterp/mips/op_const_16.S
@@ -1,4 +1,4 @@
- # const/16 vAA, /* +BBBB */
+ /* const/16 vAA, +BBBB */
FETCH_S(a0, 1) # a0 <- ssssBBBB (sign-extended)
GET_OPA(a3) # a3 <- AA
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
diff --git a/runtime/interpreter/mterp/mips/op_const_4.S b/runtime/interpreter/mterp/mips/op_const_4.S
index 8b662f9..6866c78 100644
--- a/runtime/interpreter/mterp/mips/op_const_4.S
+++ b/runtime/interpreter/mterp/mips/op_const_4.S
@@ -1,4 +1,4 @@
- # const/4 vA, /* +B */
+ /* const/4 vA, +B */
sll a1, rINST, 16 # a1 <- Bxxx0000
GET_OPA(a0) # a0 <- A+
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
diff --git a/runtime/interpreter/mterp/mips/op_const_class.S b/runtime/interpreter/mterp/mips/op_const_class.S
index 7202b11..9adea44 100644
--- a/runtime/interpreter/mterp/mips/op_const_class.S
+++ b/runtime/interpreter/mterp/mips/op_const_class.S
@@ -1,4 +1,4 @@
- # const/class vAA, Class /* BBBB */
+ /* const/class vAA, class@BBBB */
EXPORT_PC()
FETCH(a0, 1) # a0 <- BBBB
GET_OPA(a1) # a1 <- AA
diff --git a/runtime/interpreter/mterp/mips/op_const_high16.S b/runtime/interpreter/mterp/mips/op_const_high16.S
index 36c1c35..5162402 100644
--- a/runtime/interpreter/mterp/mips/op_const_high16.S
+++ b/runtime/interpreter/mterp/mips/op_const_high16.S
@@ -1,4 +1,4 @@
- # const/high16 vAA, /* +BBBB0000 */
+ /* const/high16 vAA, +BBBB0000 */
FETCH(a0, 1) # a0 <- 0000BBBB (zero-extended)
GET_OPA(a3) # a3 <- AA
sll a0, a0, 16 # a0 <- BBBB0000
diff --git a/runtime/interpreter/mterp/mips/op_const_string.S b/runtime/interpreter/mterp/mips/op_const_string.S
index d8eeb46..006e114 100644
--- a/runtime/interpreter/mterp/mips/op_const_string.S
+++ b/runtime/interpreter/mterp/mips/op_const_string.S
@@ -1,4 +1,4 @@
- # const/string vAA, String /* BBBB */
+ /* const/string vAA, string@BBBB */
EXPORT_PC()
FETCH(a0, 1) # a0 <- BBBB
GET_OPA(a1) # a1 <- AA
diff --git a/runtime/interpreter/mterp/mips/op_const_string_jumbo.S b/runtime/interpreter/mterp/mips/op_const_string_jumbo.S
index d732ca1..54cec97 100644
--- a/runtime/interpreter/mterp/mips/op_const_string_jumbo.S
+++ b/runtime/interpreter/mterp/mips/op_const_string_jumbo.S
@@ -1,10 +1,9 @@
- # const/string vAA, String /* BBBBBBBB */
+ /* const/string vAA, string@BBBBBBBB */
EXPORT_PC()
FETCH(a0, 1) # a0 <- bbbb (low)
FETCH(a2, 2) # a2 <- BBBB (high)
GET_OPA(a1) # a1 <- AA
- sll a2, a2, 16
- or a0, a0, a2 # a0 <- BBBBbbbb
+ INSERT_HIGH_HALF(a0, a2) # a0 <- BBBBbbbb
addu a2, rFP, OFF_FP_SHADOWFRAME # a2 <- shadow frame
move a3, rSELF
JAL(MterpConstString) # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
diff --git a/runtime/interpreter/mterp/mips/op_const_wide.S b/runtime/interpreter/mterp/mips/op_const_wide.S
index 01d0f87..f8911e3 100644
--- a/runtime/interpreter/mterp/mips/op_const_wide.S
+++ b/runtime/interpreter/mterp/mips/op_const_wide.S
@@ -1,14 +1,11 @@
- # const-wide vAA, /* +HHHHhhhhBBBBbbbb */
+ /* const-wide vAA, +HHHHhhhhBBBBbbbb */
FETCH(a0, 1) # a0 <- bbbb (low)
FETCH(a1, 2) # a1 <- BBBB (low middle)
FETCH(a2, 3) # a2 <- hhhh (high middle)
- sll a1, 16 #
- or a0, a1 # a0 <- BBBBbbbb (low word)
+ INSERT_HIGH_HALF(a0, a1) # a0 <- BBBBbbbb (low word)
FETCH(a3, 4) # a3 <- HHHH (high)
GET_OPA(t1) # t1 <- AA
- sll a3, 16
- or a1, a3, a2 # a1 <- HHHHhhhh (high word)
+ INSERT_HIGH_HALF(a2, a3) # a2 <- HHHHhhhh (high word)
FETCH_ADVANCE_INST(5) # advance rPC, load rINST
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64(a0, a1, t1) # vAA <- a0/a1
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_GOTO(a0, a2, t1, t0) # vAA/vAA+1 <- a0/a2
diff --git a/runtime/interpreter/mterp/mips/op_const_wide_16.S b/runtime/interpreter/mterp/mips/op_const_wide_16.S
index 583d9ef..2ca5ab9 100644
--- a/runtime/interpreter/mterp/mips/op_const_wide_16.S
+++ b/runtime/interpreter/mterp/mips/op_const_wide_16.S
@@ -1,8 +1,7 @@
- # const-wide/16 vAA, /* +BBBB */
+ /* const-wide/16 vAA, +BBBB */
FETCH_S(a0, 1) # a0 <- ssssBBBB (sign-extended)
GET_OPA(a3) # a3 <- AA
sra a1, a0, 31 # a1 <- ssssssss
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64(a0, a1, a3) # vAA <- a0/a1
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_GOTO(a0, a1, a3, t0) # vAA/vAA+1 <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_const_wide_32.S b/runtime/interpreter/mterp/mips/op_const_wide_32.S
index 3eb4574..bf802ca 100644
--- a/runtime/interpreter/mterp/mips/op_const_wide_32.S
+++ b/runtime/interpreter/mterp/mips/op_const_wide_32.S
@@ -1,11 +1,9 @@
- # const-wide/32 vAA, /* +BBBBbbbb */
+ /* const-wide/32 vAA, +BBBBbbbb */
FETCH(a0, 1) # a0 <- 0000bbbb (low)
GET_OPA(a3) # a3 <- AA
FETCH_S(a2, 2) # a2 <- ssssBBBB (high)
FETCH_ADVANCE_INST(3) # advance rPC, load rINST
- sll a2, a2, 16
- or a0, a0, a2 # a0 <- BBBBbbbb
+ INSERT_HIGH_HALF(a0, a2) # a0 <- BBBBbbbb
sra a1, a0, 31 # a1 <- ssssssss
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64(a0, a1, a3) # vAA <- a0/a1
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_GOTO(a0, a1, a3, t0) # vAA/vAA+1 <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_const_wide_high16.S b/runtime/interpreter/mterp/mips/op_const_wide_high16.S
index 88382c6..04b90fa 100644
--- a/runtime/interpreter/mterp/mips/op_const_wide_high16.S
+++ b/runtime/interpreter/mterp/mips/op_const_wide_high16.S
@@ -1,9 +1,8 @@
- # const-wide/high16 vAA, /* +BBBB000000000000 */
+ /* const-wide/high16 vAA, +BBBB000000000000 */
FETCH(a1, 1) # a1 <- 0000BBBB (zero-extended)
GET_OPA(a3) # a3 <- AA
li a0, 0 # a0 <- 00000000
sll a1, 16 # a1 <- BBBB0000
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64(a0, a1, a3) # vAA <- a0/a1
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_GOTO(a0, a1, a3, t0) # vAA/vAA+1 <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_double_to_int.S b/runtime/interpreter/mterp/mips/op_double_to_int.S
index b1792ec..3b44964 100644
--- a/runtime/interpreter/mterp/mips/op_double_to_int.S
+++ b/runtime/interpreter/mterp/mips/op_double_to_int.S
@@ -1,58 +1,39 @@
-%include "mips/unopNarrower.S" {"instr":"b d2i_doconv"}
-/*
- * Convert the double in a0/a1 to an int in a0.
- *
- * We have to clip values to int min/max per the specification. The
- * expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us.
- */
-%break
+ /*
+ * double-to-int
+ *
+ * We have to clip values to int min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+ LOAD64_F(fa0, fa0f, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-d2i_doconv:
+ li t0, INT_MIN_AS_DOUBLE_HIGH
+ mtc1 zero, fa1
+ MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
#ifdef MIPS32REVGE6
- la t0, .LDOUBLE_TO_INT_max
- LOAD64_F(fa1, fa1f, t0)
- cmp.le.d ft2, fa1, fa0
- l.s fv0, .LDOUBLE_TO_INT_maxret
- bc1nez ft2, .L${opcode}_set_vreg_f
-
- la t0, .LDOUBLE_TO_INT_min
- LOAD64_F(fa1, fa1f, t0)
- cmp.le.d ft2, fa0, fa1
- l.s fv0, .LDOUBLE_TO_INT_minret
- bc1nez ft2, .L${opcode}_set_vreg_f
-
- mov.d fa1, fa0
- cmp.un.d ft2, fa0, fa1
- li.s fv0, 0
- bc1nez ft2, .L${opcode}_set_vreg_f
+ /*
+ * TODO: simplify this when the MIPS64R6 emulator
+ * supports NAN2008=1.
+ */
+ cmp.le.d ft0, fa1, fa0
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ bc1nez ft0, 1f # if INT_MIN <= vB, proceed to truncation
+ cmp.eq.d ft0, fa0, fa0
+ selnez.d fa0, fa1, ft0 # fa0 = ordered(vB) ? INT_MIN_AS_DOUBLE : 0
#else
- la t0, .LDOUBLE_TO_INT_max
- LOAD64_F(fa1, fa1f, t0)
c.ole.d fcc0, fa1, fa0
- l.s fv0, .LDOUBLE_TO_INT_maxret
- bc1t .L${opcode}_set_vreg_f
-
- la t0, .LDOUBLE_TO_INT_min
- LOAD64_F(fa1, fa1f, t0)
- c.ole.d fcc0, fa0, fa1
- l.s fv0, .LDOUBLE_TO_INT_minret
- bc1t .L${opcode}_set_vreg_f
-
- mov.d fa1, fa0
- c.un.d fcc0, fa0, fa1
- li.s fv0, 0
- bc1t .L${opcode}_set_vreg_f
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ bc1t fcc0, 1f # if INT_MIN <= vB, proceed to truncation
+ c.eq.d fcc0, fa0, fa0
+ mtc1 zero, fa0
+ MOVE_TO_FPU_HIGH(zero, fa0, fa0f)
+ movt.d fa0, fa1, fcc0 # fa0 = ordered(vB) ? INT_MIN_AS_DOUBLE : 0
#endif
-
- trunc.w.d fv0, fa0
- b .L${opcode}_set_vreg_f
-
-.LDOUBLE_TO_INT_max:
- .dword 0x41dfffffffc00000
-.LDOUBLE_TO_INT_min:
- .dword 0xc1e0000000000000 # minint, as a double (high word)
-.LDOUBLE_TO_INT_maxret:
- .word 0x7fffffff
-.LDOUBLE_TO_INT_minret:
- .word 0x80000000
+1:
+ trunc.w.d fa0, fa0
+ SET_VREG_F_GOTO(fa0, rOBJ, t1) # vA <- result
diff --git a/runtime/interpreter/mterp/mips/op_double_to_long.S b/runtime/interpreter/mterp/mips/op_double_to_long.S
index 7f7a799..78d4a8f 100644
--- a/runtime/interpreter/mterp/mips/op_double_to_long.S
+++ b/runtime/interpreter/mterp/mips/op_double_to_long.S
@@ -1,56 +1,61 @@
-%include "mips/funopWide.S" {"instr":"b d2l_doconv", "st_result":"SET_VREG64(rRESULT0, rRESULT1, rOBJ)"}
+ /*
+ * double-to-long
+ *
+ * We have to clip values to long min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+ /* unop vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+ LOAD64_F(fa0, fa0f, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+#ifdef MIPS32REVGE6
+ /*
+ * TODO: simplify this when the MIPS64R6 emulator
+ * supports NAN2008=1.
+ */
+ li t0, LONG_MIN_AS_DOUBLE_HIGH
+ mtc1 zero, fa1
+ mthc1 t0, fa1
+ cmp.le.d ft0, fa1, fa0
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ bc1nez ft0, 1f # if LONG_MIN <= vB, proceed to truncation
+ cmp.eq.d ft0, fa0, fa0
+ selnez.d fa0, fa1, ft0 # fa0 = ordered(vB) ? LONG_MIN_AS_DOUBLE : 0
+1:
+ trunc.l.d fa0, fa0
+ SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) # vA <- result
+#else
+ c.eq.d fcc0, fa0, fa0
+ li rRESULT0, 0
+ li rRESULT1, 0
+ bc1f fcc0, .L${opcode}_get_opcode
+
+ li t0, LONG_MIN_AS_DOUBLE_HIGH
+ mtc1 zero, fa1
+ MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
+ c.ole.d fcc0, fa0, fa1
+ li rRESULT1, LONG_MIN_HIGH
+ bc1t fcc0, .L${opcode}_get_opcode
+
+ neg.d fa1, fa1
+ c.ole.d fcc0, fa1, fa0
+ nor rRESULT0, rRESULT0, zero
+ nor rRESULT1, rRESULT1, zero
+ bc1t fcc0, .L${opcode}_get_opcode
+
+ JAL(__fixdfdi)
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ b .L${opcode}_set_vreg
+#endif
%break
-d2l_doconv:
-#ifdef MIPS32REVGE6
- la t0, .LDOUBLE_TO_LONG_max
- LOAD64_F(fa1, fa1f, t0)
- cmp.le.d ft2, fa1, fa0
- la t0, .LDOUBLE_TO_LONG_ret_max
- LOAD64(rRESULT0, rRESULT1, t0)
- bc1nez ft2, .L${opcode}_set_vreg
-
- la t0, .LDOUBLE_TO_LONG_min
- LOAD64_F(fa1, fa1f, t0)
- cmp.le.d ft2, fa0, fa1
- la t0, .LDOUBLE_TO_LONG_ret_min
- LOAD64(rRESULT0, rRESULT1, t0)
- bc1nez ft2, .L${opcode}_set_vreg
-
- mov.d fa1, fa0
- cmp.un.d ft2, fa0, fa1
- li rRESULT0, 0
- li rRESULT1, 0
- bc1nez ft2, .L${opcode}_set_vreg
-#else
- la t0, .LDOUBLE_TO_LONG_max
- LOAD64_F(fa1, fa1f, t0)
- c.ole.d fcc0, fa1, fa0
- la t0, .LDOUBLE_TO_LONG_ret_max
- LOAD64(rRESULT0, rRESULT1, t0)
- bc1t .L${opcode}_set_vreg
-
- la t0, .LDOUBLE_TO_LONG_min
- LOAD64_F(fa1, fa1f, t0)
- c.ole.d fcc0, fa0, fa1
- la t0, .LDOUBLE_TO_LONG_ret_min
- LOAD64(rRESULT0, rRESULT1, t0)
- bc1t .L${opcode}_set_vreg
-
- mov.d fa1, fa0
- c.un.d fcc0, fa0, fa1
- li rRESULT0, 0
- li rRESULT1, 0
- bc1t .L${opcode}_set_vreg
+#ifndef MIPS32REVGE6
+.L${opcode}_get_opcode:
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+.L${opcode}_set_vreg:
+ SET_VREG64_GOTO(rRESULT0, rRESULT1, rOBJ, t1) # vA/vA+1 <- v0/v1
#endif
- JAL(__fixdfdi)
- b .L${opcode}_set_vreg
-
-.LDOUBLE_TO_LONG_max:
- .dword 0x43e0000000000000 # maxlong, as a double (high word)
-.LDOUBLE_TO_LONG_min:
- .dword 0xc3e0000000000000 # minlong, as a double (high word)
-.LDOUBLE_TO_LONG_ret_max:
- .dword 0x7fffffffffffffff
-.LDOUBLE_TO_LONG_ret_min:
- .dword 0x8000000000000000
diff --git a/runtime/interpreter/mterp/mips/op_fill_array_data.S b/runtime/interpreter/mterp/mips/op_fill_array_data.S
index 8605746..c3cd371 100644
--- a/runtime/interpreter/mterp/mips/op_fill_array_data.S
+++ b/runtime/interpreter/mterp/mips/op_fill_array_data.S
@@ -1,10 +1,9 @@
/* fill-array-data vAA, +BBBBBBBB */
EXPORT_PC()
- FETCH(a0, 1) # a0 <- bbbb (lo)
- FETCH(a1, 2) # a1 <- BBBB (hi)
+ FETCH(a1, 1) # a1 <- bbbb (lo)
+ FETCH(a0, 2) # a0 <- BBBB (hi)
GET_OPA(a3) # a3 <- AA
- sll a1, a1, 16 # a1 <- BBBBbbbb
- or a1, a0, a1 # a1 <- BBBBbbbb
+ INSERT_HIGH_HALF(a1, a0) # a1 <- BBBBbbbb
GET_VREG(a0, a3) # a0 <- vAA (array object)
EAS1(a1, rPC, a1) # a1 <- PC + BBBBbbbb*2 (array data off.)
JAL(MterpFillArrayData) # v0 <- Mterp(obj, payload)
diff --git a/runtime/interpreter/mterp/mips/op_filled_new_array.S b/runtime/interpreter/mterp/mips/op_filled_new_array.S
index 3f62fae..9511578 100644
--- a/runtime/interpreter/mterp/mips/op_filled_new_array.S
+++ b/runtime/interpreter/mterp/mips/op_filled_new_array.S
@@ -4,8 +4,8 @@
*
* for: filled-new-array, filled-new-array/range
*/
- # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
- # op {vCCCC..v(CCCC+AA-1)}, type /* BBBB */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
.extern $helper
EXPORT_PC()
addu a0, rFP, OFF_FP_SHADOWFRAME # a0 <- shadow frame
diff --git a/runtime/interpreter/mterp/mips/op_float_to_int.S b/runtime/interpreter/mterp/mips/op_float_to_int.S
index 8292652..087e50f 100644
--- a/runtime/interpreter/mterp/mips/op_float_to_int.S
+++ b/runtime/interpreter/mterp/mips/op_float_to_int.S
@@ -1,50 +1,36 @@
-%include "mips/funop.S" {"instr":"b f2i_doconv"}
-%break
+ /*
+ * float-to-int
+ *
+ * We have to clip values to int min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_VREG_F(fa0, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-/*
- * Not an entry point as it is used only once !!
- */
-f2i_doconv:
+ li t0, INT_MIN_AS_FLOAT
+ mtc1 t0, fa1
#ifdef MIPS32REVGE6
- l.s fa1, .LFLOAT_TO_INT_max
- cmp.le.s ft2, fa1, fa0
- l.s fv0, .LFLOAT_TO_INT_ret_max
- bc1nez ft2, .L${opcode}_set_vreg_f
-
- l.s fa1, .LFLOAT_TO_INT_min
- cmp.le.s ft2, fa0, fa1
- l.s fv0, .LFLOAT_TO_INT_ret_min
- bc1nez ft2, .L${opcode}_set_vreg_f
-
- mov.s fa1, fa0
- cmp.un.s ft2, fa0, fa1
- li.s fv0, 0
- bc1nez ft2, .L${opcode}_set_vreg_f
+ /*
+ * TODO: simplify this when the MIPS64R6 emulator
+ * supports NAN2008=1.
+ */
+ cmp.le.s ft0, fa1, fa0
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ bc1nez ft0, 1f # if INT_MIN <= vB, proceed to truncation
+ cmp.eq.s ft0, fa0, fa0
+ selnez.s fa0, fa1, ft0 # fa0 = ordered(vB) ? INT_MIN_AS_FLOAT : 0
#else
- l.s fa1, .LFLOAT_TO_INT_max
c.ole.s fcc0, fa1, fa0
- l.s fv0, .LFLOAT_TO_INT_ret_max
- bc1t .L${opcode}_set_vreg_f
-
- l.s fa1, .LFLOAT_TO_INT_min
- c.ole.s fcc0, fa0, fa1
- l.s fv0, .LFLOAT_TO_INT_ret_min
- bc1t .L${opcode}_set_vreg_f
-
- mov.s fa1, fa0
- c.un.s fcc0, fa0, fa1
- li.s fv0, 0
- bc1t .L${opcode}_set_vreg_f
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ bc1t fcc0, 1f # if INT_MIN <= vB, proceed to truncation
+ c.eq.s fcc0, fa0, fa0
+ mtc1 zero, fa0
+ movt.s fa0, fa1, fcc0 # fa0 = ordered(vB) ? INT_MIN_AS_FLOAT : 0
#endif
-
- trunc.w.s fv0, fa0
- b .L${opcode}_set_vreg_f
-
-.LFLOAT_TO_INT_max:
- .word 0x4f000000
-.LFLOAT_TO_INT_min:
- .word 0xcf000000
-.LFLOAT_TO_INT_ret_max:
- .word 0x7fffffff
-.LFLOAT_TO_INT_ret_min:
- .word 0x80000000
+1:
+ trunc.w.s fa0, fa0
+ SET_VREG_F_GOTO(fa0, rOBJ, t1) # vA <- result
diff --git a/runtime/interpreter/mterp/mips/op_float_to_long.S b/runtime/interpreter/mterp/mips/op_float_to_long.S
index a51384f..dc88a78 100644
--- a/runtime/interpreter/mterp/mips/op_float_to_long.S
+++ b/runtime/interpreter/mterp/mips/op_float_to_long.S
@@ -1,51 +1,58 @@
-%include "mips/funopWider.S" {"instr":"b f2l_doconv", "st_result":"SET_VREG64(rRESULT0, rRESULT1, rOBJ)"}
-%break
+ /*
+ * float-to-long
+ *
+ * We have to clip values to long min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+ /* unop vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG_F(fa0, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-f2l_doconv:
#ifdef MIPS32REVGE6
- l.s fa1, .LLONG_TO_max
- cmp.le.s ft2, fa1, fa0
- li rRESULT0, ~0
- li rRESULT1, ~0x80000000
- bc1nez ft2, .L${opcode}_set_vreg
-
- l.s fa1, .LLONG_TO_min
- cmp.le.s ft2, fa0, fa1
- li rRESULT0, 0
- li rRESULT1, 0x80000000
- bc1nez ft2, .L${opcode}_set_vreg
-
- mov.s fa1, fa0
- cmp.un.s ft2, fa0, fa1
- li rRESULT0, 0
- li rRESULT1, 0
- bc1nez ft2, .L${opcode}_set_vreg
+ /*
+ * TODO: simplify this when the MIPS64R6 emulator
+ * supports NAN2008=1.
+ */
+ li t0, LONG_MIN_AS_FLOAT
+ mtc1 t0, fa1
+ cmp.le.s ft0, fa1, fa0
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ bc1nez ft0, 1f # if LONG_MIN <= vB, proceed to truncation
+ cmp.eq.s ft0, fa0, fa0
+ selnez.s fa0, fa1, ft0 # fa0 = ordered(vB) ? LONG_MIN_AS_FLOAT : 0
+1:
+ trunc.l.s fa0, fa0
+ SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) # vA <- result
#else
- l.s fa1, .LLONG_TO_max
- c.ole.s fcc0, fa1, fa0
- li rRESULT0, ~0
- li rRESULT1, ~0x80000000
- bc1t .L${opcode}_set_vreg
-
- l.s fa1, .LLONG_TO_min
- c.ole.s fcc0, fa0, fa1
- li rRESULT0, 0
- li rRESULT1, 0x80000000
- bc1t .L${opcode}_set_vreg
-
- mov.s fa1, fa0
- c.un.s fcc0, fa0, fa1
+ c.eq.s fcc0, fa0, fa0
li rRESULT0, 0
li rRESULT1, 0
- bc1t .L${opcode}_set_vreg
-#endif
+ bc1f fcc0, .L${opcode}_get_opcode
+
+ li t0, LONG_MIN_AS_FLOAT
+ mtc1 t0, fa1
+ c.ole.s fcc0, fa0, fa1
+ li rRESULT1, LONG_MIN_HIGH
+ bc1t fcc0, .L${opcode}_get_opcode
+
+ neg.s fa1, fa1
+ c.ole.s fcc0, fa1, fa0
+ nor rRESULT0, rRESULT0, zero
+ nor rRESULT1, rRESULT1, zero
+ bc1t fcc0, .L${opcode}_get_opcode
JAL(__fixsfdi)
-
+ GET_INST_OPCODE(t1) # extract opcode from rINST
b .L${opcode}_set_vreg
+#endif
+%break
-.LLONG_TO_max:
- .word 0x5f000000
-
-.LLONG_TO_min:
- .word 0xdf000000
+#ifndef MIPS32REVGE6
+.L${opcode}_get_opcode:
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+.L${opcode}_set_vreg:
+ SET_VREG64_GOTO(rRESULT0, rRESULT1, rOBJ, t1) # vA/vA+1 <- v0/v1
+#endif
diff --git a/runtime/interpreter/mterp/mips/op_goto_32.S b/runtime/interpreter/mterp/mips/op_goto_32.S
index 67f52e9..ef5bf6b 100644
--- a/runtime/interpreter/mterp/mips/op_goto_32.S
+++ b/runtime/interpreter/mterp/mips/op_goto_32.S
@@ -8,8 +8,7 @@
* our "backward branch" test must be "<=0" instead of "<0".
*/
/* goto/32 +AAAAAAAA */
- FETCH(a0, 1) # a0 <- aaaa (lo)
+ FETCH(rINST, 1) # rINST <- aaaa (lo)
FETCH(a1, 2) # a1 <- AAAA (hi)
- sll a1, a1, 16
- or rINST, a0, a1 # rINST <- AAAAaaaa
+ INSERT_HIGH_HALF(rINST, a1) # rINST <- AAAAaaaa
b MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/mips/op_iget.S b/runtime/interpreter/mterp/mips/op_iget.S
index 86d44fa..01f42d9 100644
--- a/runtime/interpreter/mterp/mips/op_iget.S
+++ b/runtime/interpreter/mterp/mips/op_iget.S
@@ -4,6 +4,7 @@
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
*/
+ /* op vA, vB, field@CCCC */
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
GET_OPB(a1) # a1 <- B
@@ -15,11 +16,10 @@
GET_OPA4(a2) # a2<- A+
PREFETCH_INST(2) # load rINST
bnez a3, MterpPossibleException # bail out
- .if $is_object
- SET_VREG_OBJECT(v0, a2) # fp[A] <- v0
- .else
- SET_VREG(v0, a2) # fp[A] <- v0
- .endif
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ .if $is_object
+ SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[A] <- v0
+ .else
+ SET_VREG_GOTO(v0, a2, t0) # fp[A] <- v0
+ .endif
diff --git a/runtime/interpreter/mterp/mips/op_iget_object_quick.S b/runtime/interpreter/mterp/mips/op_iget_object_quick.S
index 31d94b9..95c34d7 100644
--- a/runtime/interpreter/mterp/mips/op_iget_object_quick.S
+++ b/runtime/interpreter/mterp/mips/op_iget_object_quick.S
@@ -9,7 +9,6 @@
GET_OPA4(a2) # a2<- A+
PREFETCH_INST(2) # load rINST
bnez a3, MterpPossibleException # bail out
- SET_VREG_OBJECT(v0, a2) # fp[A] <- v0
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[A] <- v0
diff --git a/runtime/interpreter/mterp/mips/op_iget_quick.S b/runtime/interpreter/mterp/mips/op_iget_quick.S
index fbafa5b..46277d3 100644
--- a/runtime/interpreter/mterp/mips/op_iget_quick.S
+++ b/runtime/interpreter/mterp/mips/op_iget_quick.S
@@ -1,6 +1,6 @@
%default { "load":"lw" }
/* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- # op vA, vB, offset /* CCCC */
+ /* op vA, vB, offset@CCCC */
GET_OPB(a2) # a2 <- B
GET_VREG(a3, a2) # a3 <- object we're operating on
FETCH(a1, 1) # a1 <- field byte offset
diff --git a/runtime/interpreter/mterp/mips/op_iget_wide.S b/runtime/interpreter/mterp/mips/op_iget_wide.S
index 8fe3089..cf5019e 100644
--- a/runtime/interpreter/mterp/mips/op_iget_wide.S
+++ b/runtime/interpreter/mterp/mips/op_iget_wide.S
@@ -3,6 +3,7 @@
*
* for: iget-wide
*/
+ /* op vA, vB, field@CCCC */
EXPORT_PC()
FETCH(a0, 1) # a0 <- field byte offset
GET_OPB(a1) # a1 <- B
@@ -14,7 +15,6 @@
GET_OPA4(a2) # a2<- A+
PREFETCH_INST(2) # load rINST
bnez a3, MterpException # bail out
- SET_VREG64(v0, v1, a2) # fp[A] <- v0/v1
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_GOTO(v0, v1, a2, t0) # fp[A] <- v0/v1
diff --git a/runtime/interpreter/mterp/mips/op_iget_wide_quick.S b/runtime/interpreter/mterp/mips/op_iget_wide_quick.S
index 4d2f291..128be57 100644
--- a/runtime/interpreter/mterp/mips/op_iget_wide_quick.S
+++ b/runtime/interpreter/mterp/mips/op_iget_wide_quick.S
@@ -1,4 +1,4 @@
- # iget-wide-quick vA, vB, offset /* CCCC */
+ /* iget-wide-quick vA, vB, offset@CCCC */
GET_OPB(a2) # a2 <- B
GET_VREG(a3, a2) # a3 <- object we're operating on
FETCH(a1, 1) # a1 <- field byte offset
@@ -9,5 +9,4 @@
LOAD64(a0, a1, t0) # a0 <- obj.field (64 bits, aligned)
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64(a0, a1, a2) # fp[A] <- a0/a1
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_GOTO(a0, a1, a2, t0) # fp[A] <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_instance_of.S b/runtime/interpreter/mterp/mips/op_instance_of.S
index d2679bd..706dcf3 100644
--- a/runtime/interpreter/mterp/mips/op_instance_of.S
+++ b/runtime/interpreter/mterp/mips/op_instance_of.S
@@ -4,7 +4,7 @@
* Most common situation is a non-null object, being compared against
* an already-resolved class.
*/
- # instance-of vA, vB, class /* CCCC */
+ /* instance-of vA, vB, class@CCCC */
EXPORT_PC()
FETCH(a0, 1) # a0 <- CCCC
GET_OPB(a1) # a1 <- B
diff --git a/runtime/interpreter/mterp/mips/op_int_to_byte.S b/runtime/interpreter/mterp/mips/op_int_to_byte.S
index 77314c62..9266aab 100644
--- a/runtime/interpreter/mterp/mips/op_int_to_byte.S
+++ b/runtime/interpreter/mterp/mips/op_int_to_byte.S
@@ -1 +1 @@
-%include "mips/unop.S" {"preinstr":"sll a0, a0, 24", "instr":"sra a0, a0, 24"}
+%include "mips/unop.S" {"instr":"SEB(a0, a0)"}
diff --git a/runtime/interpreter/mterp/mips/op_int_to_short.S b/runtime/interpreter/mterp/mips/op_int_to_short.S
index 5649c2a..8749cd8 100644
--- a/runtime/interpreter/mterp/mips/op_int_to_short.S
+++ b/runtime/interpreter/mterp/mips/op_int_to_short.S
@@ -1 +1 @@
-%include "mips/unop.S" {"preinstr":"sll a0, 16", "instr":"sra a0, 16"}
+%include "mips/unop.S" {"instr":"SEH(a0, a0)"}
diff --git a/runtime/interpreter/mterp/mips/op_iput.S b/runtime/interpreter/mterp/mips/op_iput.S
index 732a9a4..9133d60 100644
--- a/runtime/interpreter/mterp/mips/op_iput.S
+++ b/runtime/interpreter/mterp/mips/op_iput.S
@@ -4,7 +4,7 @@
*
* for: iput, iput-boolean, iput-byte, iput-char, iput-short
*/
- # op vA, vB, field /* CCCC */
+ /* op vA, vB, field@CCCC */
.extern $handler
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
diff --git a/runtime/interpreter/mterp/mips/op_iput_object.S b/runtime/interpreter/mterp/mips/op_iput_object.S
index 6b856e7..cfa56ec 100644
--- a/runtime/interpreter/mterp/mips/op_iput_object.S
+++ b/runtime/interpreter/mterp/mips/op_iput_object.S
@@ -3,7 +3,7 @@
*
* for: iput-object, iput-object-volatile
*/
- # op vA, vB, field /* CCCC */
+ /* op vA, vB, field@CCCC */
EXPORT_PC()
addu a0, rFP, OFF_FP_SHADOWFRAME
move a1, rPC
diff --git a/runtime/interpreter/mterp/mips/op_iput_object_quick.S b/runtime/interpreter/mterp/mips/op_iput_object_quick.S
index c3f1526..82044f5 100644
--- a/runtime/interpreter/mterp/mips/op_iput_object_quick.S
+++ b/runtime/interpreter/mterp/mips/op_iput_object_quick.S
@@ -1,5 +1,5 @@
/* For: iput-object-quick */
- # op vA, vB, offset /* CCCC */
+ /* op vA, vB, offset@CCCC */
EXPORT_PC()
addu a0, rFP, OFF_FP_SHADOWFRAME
move a1, rPC
diff --git a/runtime/interpreter/mterp/mips/op_iput_quick.S b/runtime/interpreter/mterp/mips/op_iput_quick.S
index 0829666..d9753b1 100644
--- a/runtime/interpreter/mterp/mips/op_iput_quick.S
+++ b/runtime/interpreter/mterp/mips/op_iput_quick.S
@@ -1,6 +1,6 @@
%default { "store":"sw" }
/* For: iput-quick, iput-object-quick */
- # op vA, vB, offset /* CCCC */
+ /* op vA, vB, offset@CCCC */
GET_OPB(a2) # a2 <- B
GET_VREG(a3, a2) # a3 <- fp[B], the object pointer
FETCH(a1, 1) # a1 <- field byte offset
@@ -9,6 +9,7 @@
GET_VREG(a0, a2) # a0 <- fp[A]
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
addu t0, a3, a1
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ GET_OPCODE_TARGET(t1)
$store a0, 0(t0) # obj.field (8/16/32 bits) <- a0
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ JR(t1) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_iput_wide.S b/runtime/interpreter/mterp/mips/op_iput_wide.S
index 6d23f8c..bc3d758 100644
--- a/runtime/interpreter/mterp/mips/op_iput_wide.S
+++ b/runtime/interpreter/mterp/mips/op_iput_wide.S
@@ -1,4 +1,4 @@
- # iput-wide vA, vB, field /* CCCC */
+ /* iput-wide vA, vB, field@CCCC */
.extern artSet64InstanceFromMterp
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
diff --git a/runtime/interpreter/mterp/mips/op_iput_wide_quick.S b/runtime/interpreter/mterp/mips/op_iput_wide_quick.S
index 9fdb847..0eb228d 100644
--- a/runtime/interpreter/mterp/mips/op_iput_wide_quick.S
+++ b/runtime/interpreter/mterp/mips/op_iput_wide_quick.S
@@ -1,4 +1,4 @@
- # iput-wide-quick vA, vB, offset /* CCCC */
+ /* iput-wide-quick vA, vB, offset@CCCC */
GET_OPA4(a0) # a0 <- A(+)
GET_OPB(a1) # a1 <- B
GET_VREG(a2, a1) # a2 <- fp[B], the object pointer
@@ -9,6 +9,7 @@
FETCH(a3, 1) # a3 <- field byte offset
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
addu a2, a2, a3 # obj.field (64 bits, aligned) <- a0/a1
- STORE64(a0, a1, a2) # obj.field (64 bits, aligned) <- a0/a1
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ GET_OPCODE_TARGET(t0)
+ STORE64(a0, a1, a2) # obj.field (64 bits, aligned) <- a0/a1
+ JR(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_long_to_double.S b/runtime/interpreter/mterp/mips/op_long_to_double.S
index b83aaf4..153f582 100644
--- a/runtime/interpreter/mterp/mips/op_long_to_double.S
+++ b/runtime/interpreter/mterp/mips/op_long_to_double.S
@@ -1 +1,20 @@
-%include "mips/funopWide.S" {"instr":"JAL(__floatdidf)", "ld_arg":"LOAD64(rARG0, rARG1, a3)"}
+ /*
+ * long-to-double
+ */
+ /* unop vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+
+#ifdef MIPS32REVGE6
+ LOAD64_F(fv0, fv0f, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ cvt.d.l fv0, fv0
+#else
+ LOAD64(rARG0, rARG1, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ JAL(__floatdidf) # a0/a1 <- op, a2-a3 changed
+#endif
+
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- result
diff --git a/runtime/interpreter/mterp/mips/op_long_to_float.S b/runtime/interpreter/mterp/mips/op_long_to_float.S
index 27faba5..dd1ab81 100644
--- a/runtime/interpreter/mterp/mips/op_long_to_float.S
+++ b/runtime/interpreter/mterp/mips/op_long_to_float.S
@@ -1 +1,20 @@
-%include "mips/unopNarrower.S" {"instr":"JAL(__floatdisf)", "load":"LOAD64(rARG0, rARG1, a3)"}
+ /*
+ * long-to-float
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+
+#ifdef MIPS32REVGE6
+ LOAD64_F(fv0, fv0f, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ cvt.s.l fv0, fv0
+#else
+ LOAD64(rARG0, rARG1, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ JAL(__floatdisf)
+#endif
+
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- fv0
diff --git a/runtime/interpreter/mterp/mips/op_move.S b/runtime/interpreter/mterp/mips/op_move.S
index 76588ba..547ea3a 100644
--- a/runtime/interpreter/mterp/mips/op_move.S
+++ b/runtime/interpreter/mterp/mips/op_move.S
@@ -7,8 +7,7 @@
GET_VREG(a2, a1) # a2 <- fp[B]
GET_INST_OPCODE(t0) # t0 <- opcode from rINST
.if $is_object
- SET_VREG_OBJECT(a2, a0) # fp[A] <- a2
+ SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[A] <- a2
.else
- SET_VREG(a2, a0) # fp[A] <- a2
+ SET_VREG_GOTO(a2, a0, t0) # fp[A] <- a2
.endif
- GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move_16.S b/runtime/interpreter/mterp/mips/op_move_16.S
index f7de6c2..91b7399 100644
--- a/runtime/interpreter/mterp/mips/op_move_16.S
+++ b/runtime/interpreter/mterp/mips/op_move_16.S
@@ -7,8 +7,7 @@
GET_VREG(a2, a1) # a2 <- fp[BBBB]
GET_INST_OPCODE(t0) # extract opcode from rINST
.if $is_object
- SET_VREG_OBJECT(a2, a0) # fp[AAAA] <- a2
+ SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[AAAA] <- a2
.else
- SET_VREG(a2, a0) # fp[AAAA] <- a2
+ SET_VREG_GOTO(a2, a0, t0) # fp[AAAA] <- a2
.endif
- GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move_exception.S b/runtime/interpreter/mterp/mips/op_move_exception.S
index f04a035..f1bece7 100644
--- a/runtime/interpreter/mterp/mips/op_move_exception.S
+++ b/runtime/interpreter/mterp/mips/op_move_exception.S
@@ -2,7 +2,8 @@
GET_OPA(a2) # a2 <- AA
lw a3, THREAD_EXCEPTION_OFFSET(rSELF) # get exception obj
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- SET_VREG_OBJECT(a3, a2) # fp[AA] <- exception obj
GET_INST_OPCODE(t0) # extract opcode from rINST
+ GET_OPCODE_TARGET(t0)
+ SET_VREG_OBJECT(a3, a2) # fp[AA] <- exception obj
sw zero, THREAD_EXCEPTION_OFFSET(rSELF) # clear exception
- GOTO_OPCODE(t0) # jump to next instruction
+ JR(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move_from16.S b/runtime/interpreter/mterp/mips/op_move_from16.S
index b8be741..90c25c9 100644
--- a/runtime/interpreter/mterp/mips/op_move_from16.S
+++ b/runtime/interpreter/mterp/mips/op_move_from16.S
@@ -7,8 +7,7 @@
GET_VREG(a2, a1) # a2 <- fp[BBBB]
GET_INST_OPCODE(t0) # extract opcode from rINST
.if $is_object
- SET_VREG_OBJECT(a2, a0) # fp[AA] <- a2
+ SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[AA] <- a2
.else
- SET_VREG(a2, a0) # fp[AA] <- a2
+ SET_VREG_GOTO(a2, a0, t0) # fp[AA] <- a2
.endif
- GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move_result.S b/runtime/interpreter/mterp/mips/op_move_result.S
index 315c68e..a4d5bfe 100644
--- a/runtime/interpreter/mterp/mips/op_move_result.S
+++ b/runtime/interpreter/mterp/mips/op_move_result.S
@@ -7,8 +7,7 @@
lw a0, 0(a0) # a0 <- result.i
GET_INST_OPCODE(t0) # extract opcode from rINST
.if $is_object
- SET_VREG_OBJECT(a0, a2) # fp[AA] <- a0
+ SET_VREG_OBJECT_GOTO(a0, a2, t0) # fp[AA] <- a0
.else
- SET_VREG(a0, a2) # fp[AA] <- a0
+ SET_VREG_GOTO(a0, a2, t0) # fp[AA] <- a0
.endif
- GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move_result_wide.S b/runtime/interpreter/mterp/mips/op_move_result_wide.S
index 940c1ff..1259218 100644
--- a/runtime/interpreter/mterp/mips/op_move_result_wide.S
+++ b/runtime/interpreter/mterp/mips/op_move_result_wide.S
@@ -3,6 +3,5 @@
lw a3, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
LOAD64(a0, a1, a3) # a0/a1 <- retval.j
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- SET_VREG64(a0, a1, a2) # fp[AA] <- a0/a1
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_GOTO(a0, a1, a2, t0) # fp[AA] <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_move_wide.S b/runtime/interpreter/mterp/mips/op_move_wide.S
index dd224c3..01d0949 100644
--- a/runtime/interpreter/mterp/mips/op_move_wide.S
+++ b/runtime/interpreter/mterp/mips/op_move_wide.S
@@ -5,6 +5,5 @@
EAS2(a3, rFP, a3) # a3 <- &fp[B]
LOAD64(a0, a1, a3) # a0/a1 <- fp[B]
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- SET_VREG64(a0, a1, a2) # fp[A] <- a0/a1
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_GOTO(a0, a1, a2, t0) # fp[A] <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_move_wide_16.S b/runtime/interpreter/mterp/mips/op_move_wide_16.S
index d8761eb..587ba04 100644
--- a/runtime/interpreter/mterp/mips/op_move_wide_16.S
+++ b/runtime/interpreter/mterp/mips/op_move_wide_16.S
@@ -5,6 +5,5 @@
EAS2(a3, rFP, a3) # a3 <- &fp[BBBB]
LOAD64(a0, a1, a3) # a0/a1 <- fp[BBBB]
FETCH_ADVANCE_INST(3) # advance rPC, load rINST
- SET_VREG64(a0, a1, a2) # fp[AAAA] <- a0/a1
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_GOTO(a0, a1, a2, t0) # fp[AAAA] <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_move_wide_from16.S b/runtime/interpreter/mterp/mips/op_move_wide_from16.S
index 2103fa1..5003fbd 100644
--- a/runtime/interpreter/mterp/mips/op_move_wide_from16.S
+++ b/runtime/interpreter/mterp/mips/op_move_wide_from16.S
@@ -5,6 +5,5 @@
EAS2(a3, rFP, a3) # a3 <- &fp[BBBB]
LOAD64(a0, a1, a3) # a0/a1 <- fp[BBBB]
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- SET_VREG64(a0, a1, a2) # fp[AA] <- a0/a1
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_GOTO(a0, a1, a2, t0) # fp[AA] <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_mul_long.S b/runtime/interpreter/mterp/mips/op_mul_long.S
index 803bbec..74b049a 100644
--- a/runtime/interpreter/mterp/mips/op_mul_long.S
+++ b/runtime/interpreter/mterp/mips/op_mul_long.S
@@ -39,5 +39,4 @@
.L${opcode}_finish:
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64(v0, v1, a0) # vAA::vAA+1 <- v0(low) :: v1(high)
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_GOTO(v0, v1, a0, t0) # vAA/vAA+1 <- v0(low)/v1(high)
diff --git a/runtime/interpreter/mterp/mips/op_mul_long_2addr.S b/runtime/interpreter/mterp/mips/op_mul_long_2addr.S
index 6950b71..683b055 100644
--- a/runtime/interpreter/mterp/mips/op_mul_long_2addr.S
+++ b/runtime/interpreter/mterp/mips/op_mul_long_2addr.S
@@ -26,6 +26,4 @@
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
GET_INST_OPCODE(t1) # extract opcode from rINST
- # vAA <- v0 (low)
- SET_VREG64(v0, v1, rOBJ) # vAA+1 <- v1 (high)
- GOTO_OPCODE(t1) # jump to next instruction
+ SET_VREG64_GOTO(v0, v1, rOBJ, t1) # vA/vA+1 <- v0(low)/v1(high)
diff --git a/runtime/interpreter/mterp/mips/op_new_instance.S b/runtime/interpreter/mterp/mips/op_new_instance.S
index 51a09b2..3c9e83f 100644
--- a/runtime/interpreter/mterp/mips/op_new_instance.S
+++ b/runtime/interpreter/mterp/mips/op_new_instance.S
@@ -1,7 +1,7 @@
/*
* Create a new instance of a class.
*/
- # new-instance vAA, class /* BBBB */
+ /* new-instance vAA, class@BBBB */
EXPORT_PC()
addu a0, rFP, OFF_FP_SHADOWFRAME
move a1, rSELF
diff --git a/runtime/interpreter/mterp/mips/op_packed_switch.S b/runtime/interpreter/mterp/mips/op_packed_switch.S
index ffa4f47..0a1ff98 100644
--- a/runtime/interpreter/mterp/mips/op_packed_switch.S
+++ b/runtime/interpreter/mterp/mips/op_packed_switch.S
@@ -12,8 +12,7 @@
FETCH(a0, 1) # a0 <- bbbb (lo)
FETCH(a1, 2) # a1 <- BBBB (hi)
GET_OPA(a3) # a3 <- AA
- sll t0, a1, 16
- or a0, a0, t0 # a0 <- BBBBbbbb
+ INSERT_HIGH_HALF(a0, a1) # a0 <- BBBBbbbb
GET_VREG(a1, a3) # a1 <- vAA
EAS1(a0, rPC, a0) # a0 <- PC + BBBBbbbb*2
JAL($func) # a0 <- code-unit branch offset
diff --git a/runtime/interpreter/mterp/mips/op_sget.S b/runtime/interpreter/mterp/mips/op_sget.S
index 3efcfbb..64ece1e 100644
--- a/runtime/interpreter/mterp/mips/op_sget.S
+++ b/runtime/interpreter/mterp/mips/op_sget.S
@@ -4,7 +4,7 @@
*
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
- # op vAA, field /* BBBB */
+ /* op vAA, field@BBBB */
.extern $helper
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
@@ -15,11 +15,10 @@
GET_OPA(a2) # a2 <- AA
PREFETCH_INST(2)
bnez a3, MterpException # bail out
-.if $is_object
- SET_VREG_OBJECT(v0, a2) # fp[AA] <- v0
-.else
- SET_VREG(v0, a2) # fp[AA] <- v0
-.endif
ADVANCE(2)
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+.if $is_object
+ SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[AA] <- v0
+.else
+ SET_VREG_GOTO(v0, a2, t0) # fp[AA] <- v0
+.endif
diff --git a/runtime/interpreter/mterp/mips/op_sget_wide.S b/runtime/interpreter/mterp/mips/op_sget_wide.S
index 7aee386..c729250 100644
--- a/runtime/interpreter/mterp/mips/op_sget_wide.S
+++ b/runtime/interpreter/mterp/mips/op_sget_wide.S
@@ -1,7 +1,7 @@
/*
* 64-bit SGET handler.
*/
- # sget-wide vAA, field /* BBBB */
+ /* sget-wide vAA, field@BBBB */
.extern artGet64StaticFromCode
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
@@ -12,6 +12,5 @@
bnez a3, MterpException
GET_OPA(a1) # a1 <- AA
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- SET_VREG64(v0, v1, a1) # vAA/vAA+1 <- v0/v1
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_GOTO(v0, v1, a1, t0) # vAA/vAA+1 <- v0/v1
diff --git a/runtime/interpreter/mterp/mips/op_shl_long.S b/runtime/interpreter/mterp/mips/op_shl_long.S
index 0121669..cc08112 100644
--- a/runtime/interpreter/mterp/mips/op_shl_long.S
+++ b/runtime/interpreter/mterp/mips/op_shl_long.S
@@ -24,7 +24,7 @@
srl a0, v1 # alo<- alo >> (32-(shift&31))
sll v1, a1, a2 # rhi<- ahi << (shift&31)
or v1, a0 # rhi<- rhi | alo
- SET_VREG64_GOTO(v0, v1, t2, t0) # vAA/vAA+1 <- a0/a1
+ SET_VREG64_GOTO(v0, v1, t2, t0) # vAA/vAA+1 <- v0/v1
%break
.L${opcode}_finish:
diff --git a/runtime/interpreter/mterp/mips/op_shl_long_2addr.S b/runtime/interpreter/mterp/mips/op_shl_long_2addr.S
index 8ce6058..93c5783 100644
--- a/runtime/interpreter/mterp/mips/op_shl_long_2addr.S
+++ b/runtime/interpreter/mterp/mips/op_shl_long_2addr.S
@@ -7,7 +7,7 @@
GET_OPB(a3) # a3 <- B
GET_VREG(a2, a3) # a2 <- vB
EAS2(t2, rFP, rOBJ) # t2 <- &fp[A]
- LOAD64(a0, a1, t2) # a0/a1 <- vAA/vAA+1
+ LOAD64(a0, a1, t2) # a0/a1 <- vA/vA+1
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -20,8 +20,8 @@
srl a0, v1 # alo<- alo >> (32-(shift&31))
sll v1, a1, a2 # rhi<- ahi << (shift&31)
or v1, a0 # rhi<- rhi | alo
- SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vAA/vAA+1 <- a0/a1
+ SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vA/vA+1 <- v0/v1
%break
.L${opcode}_finish:
- SET_VREG64_GOTO(zero, v0, rOBJ, t0) # vAA/vAA+1 <- rlo/rhi
+ SET_VREG64_GOTO(zero, v0, rOBJ, t0) # vA/vA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/op_shr_long.S b/runtime/interpreter/mterp/mips/op_shr_long.S
index 4c42758..ea032fe 100644
--- a/runtime/interpreter/mterp/mips/op_shr_long.S
+++ b/runtime/interpreter/mterp/mips/op_shr_long.S
@@ -23,7 +23,7 @@
sll a1, 1
sll a1, a0 # ahi<- ahi << (32-(shift&31))
or v0, a1 # rlo<- rlo | ahi
- SET_VREG64_GOTO(v0, v1, t3, t0) # vAA/VAA+1 <- v0/v0
+ SET_VREG64_GOTO(v0, v1, t3, t0) # vAA/VAA+1 <- v0/v1
%break
.L${opcode}_finish:
diff --git a/runtime/interpreter/mterp/mips/op_shr_long_2addr.S b/runtime/interpreter/mterp/mips/op_shr_long_2addr.S
index 3adc085..c805ea4 100644
--- a/runtime/interpreter/mterp/mips/op_shr_long_2addr.S
+++ b/runtime/interpreter/mterp/mips/op_shr_long_2addr.S
@@ -7,7 +7,7 @@
GET_OPB(a3) # a3 <- B
GET_VREG(a2, a3) # a2 <- vB
EAS2(t0, rFP, t2) # t0 <- &fp[A]
- LOAD64(a0, a1, t0) # a0/a1 <- vAA/vAA+1
+ LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -19,9 +19,9 @@
sll a1, 1
sll a1, a0 # ahi<- ahi << (32-(shift&31))
or v0, a1 # rlo<- rlo | ahi
- SET_VREG64_GOTO(v0, v1, t2, t0) # vAA/vAA+1 <- a0/a1
+ SET_VREG64_GOTO(v0, v1, t2, t0) # vA/vA+1 <- v0/v1
%break
.L${opcode}_finish:
sra a3, a1, 31 # a3<- sign(ah)
- SET_VREG64_GOTO(v1, a3, t2, t0) # vAA/vAA+1 <- rlo/rhi
+ SET_VREG64_GOTO(v1, a3, t2, t0) # vA/vA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/op_sput.S b/runtime/interpreter/mterp/mips/op_sput.S
index ee313b9..7034a0e 100644
--- a/runtime/interpreter/mterp/mips/op_sput.S
+++ b/runtime/interpreter/mterp/mips/op_sput.S
@@ -4,7 +4,7 @@
*
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
- # op vAA, field /* BBBB */
+ /* op vAA, field@BBBB */
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
GET_OPA(a3) # a3 <- AA
diff --git a/runtime/interpreter/mterp/mips/op_sput_wide.S b/runtime/interpreter/mterp/mips/op_sput_wide.S
index 1e11466..3b347fc 100644
--- a/runtime/interpreter/mterp/mips/op_sput_wide.S
+++ b/runtime/interpreter/mterp/mips/op_sput_wide.S
@@ -1,7 +1,7 @@
/*
* 64-bit SPUT handler.
*/
- # sput-wide vAA, field /* BBBB */
+ /* sput-wide vAA, field@BBBB */
.extern artSet64IndirectStaticFromMterp
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
diff --git a/runtime/interpreter/mterp/mips/op_ushr_long_2addr.S b/runtime/interpreter/mterp/mips/op_ushr_long_2addr.S
index ccf1f7e..9e93f34 100644
--- a/runtime/interpreter/mterp/mips/op_ushr_long_2addr.S
+++ b/runtime/interpreter/mterp/mips/op_ushr_long_2addr.S
@@ -7,7 +7,7 @@
GET_OPB(a3) # a3 <- B
GET_VREG(a2, a3) # a2 <- vB
EAS2(t0, rFP, t3) # t0 <- &fp[A]
- LOAD64(a0, a1, t0) # a0/a1 <- vAA/vAA+1
+ LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -20,8 +20,8 @@
sll a1, 1
sll a1, a0 # ahi<- ahi << (32-(shift&31))
or v0, a1 # rlo<- rlo | ahi
- SET_VREG64_GOTO(v0, v1, t3, t0) # vAA/vAA+1 <- a0/a1
+ SET_VREG64_GOTO(v0, v1, t3, t0) # vA/vA+1 <- v0/v1
%break
.L${opcode}_finish:
- SET_VREG64_GOTO(v1, zero, t3, t0) # vAA/vAA+1 <- rlo/rhi
+ SET_VREG64_GOTO(v1, zero, t3, t0) # vA/vA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/unop.S b/runtime/interpreter/mterp/mips/unop.S
index 52a8f0a..bc99263 100644
--- a/runtime/interpreter/mterp/mips/unop.S
+++ b/runtime/interpreter/mterp/mips/unop.S
@@ -1,11 +1,11 @@
%default {"preinstr":"", "result0":"a0"}
/*
* Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op a0".
+ * specifies an instruction that performs "result0 = op a0".
* This could be a MIPS instruction or a function call.
*
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
+ * for: int-to-byte, int-to-char, int-to-short,
+ * neg-int, not-int, neg-float
*/
/* unop vA, vB */
GET_OPB(a3) # a3 <- B
@@ -15,5 +15,4 @@
$preinstr # optional op
$instr # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t1) # extract opcode from rINST
- SET_VREG_GOTO($result0, t0, t1) # vAA <- result0
- /* 9-10 instructions */
+ SET_VREG_GOTO($result0, t0, t1) # vA <- result0
diff --git a/runtime/interpreter/mterp/mips/unopNarrower.S b/runtime/interpreter/mterp/mips/unopNarrower.S
index 9c38bad..0196e27 100644
--- a/runtime/interpreter/mterp/mips/unopNarrower.S
+++ b/runtime/interpreter/mterp/mips/unopNarrower.S
@@ -1,24 +1,16 @@
%default {"load":"LOAD64_F(fa0, fa0f, a3)"}
/*
- * Generic 64bit-to-32bit unary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = op a0/a1", where
- * "result" is a 32-bit quantity in a0.
+ * Generic 64bit-to-32bit floating-point unary operation. Provide an "instr"
+ * line that specifies an instruction that performs "fv0 = op fa0".
*
- * For: long-to-float, double-to-int, double-to-float
- * If hard floating point support is available, use fa0 as the parameter,
- * except for long-to-float opcode.
- * (This would work for long-to-int, but that instruction is actually
- * an exact match for OP_MOVE.)
+ * For: double-to-float
*/
/* unop vA, vB */
GET_OPB(a3) # a3 <- B
- GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPA4(rOBJ) # rOBJ <- A+
EAS2(a3, rFP, a3) # a3 <- &fp[B]
$load
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
$instr
-
-.L${opcode}_set_vreg_f:
- SET_VREG_F(fv0, rOBJ) # vA <- result0
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- fv0
diff --git a/runtime/interpreter/mterp/mips/unopWide.S b/runtime/interpreter/mterp/mips/unopWide.S
index fd25dff..135d9fa 100644
--- a/runtime/interpreter/mterp/mips/unopWide.S
+++ b/runtime/interpreter/mterp/mips/unopWide.S
@@ -1,7 +1,7 @@
%default {"preinstr":"", "result0":"a0", "result1":"a1"}
/*
* Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op a0/a1".
+ * specifies an instruction that performs "result0/result1 = op a0/a1".
* This could be MIPS instruction or a function call.
*
* For: neg-long, not-long, neg-double,
@@ -10,11 +10,9 @@
GET_OPA4(rOBJ) # rOBJ <- A+
GET_OPB(a3) # a3 <- B
EAS2(a3, rFP, a3) # a3 <- &fp[B]
- LOAD64(a0, a1, a3) # a0/a1 <- vAA
+ LOAD64(a0, a1, a3) # a0/a1 <- vA
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
$preinstr # optional op
$instr # a0/a1 <- op, a2-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64($result0, $result1, rOBJ) # vAA <- a0/a1
- GOTO_OPCODE(t0) # jump to next instruction
- /* 12-13 instructions */
+ SET_VREG64_GOTO($result0, $result1, rOBJ, t0) # vA/vA+1 <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/unopWider.S b/runtime/interpreter/mterp/mips/unopWider.S
index 1c18837..ca888ad 100644
--- a/runtime/interpreter/mterp/mips/unopWider.S
+++ b/runtime/interpreter/mterp/mips/unopWider.S
@@ -1,8 +1,7 @@
%default {"preinstr":"", "result0":"a0", "result1":"a1"}
/*
* Generic 32bit-to-64bit unary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = op a0", where
- * "result" is a 64-bit quantity in a0/a1.
+ * that specifies an instruction that performs "result0/result1 = op a0".
*
* For: int-to-long
*/
@@ -14,6 +13,4 @@
$preinstr # optional op
$instr # result <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64($result0, $result1, rOBJ) # vA/vA+1 <- a0/a1
- GOTO_OPCODE(t0) # jump to next instruction
- /* 10-11 instructions */
+ SET_VREG64_GOTO($result0, $result1, rOBJ, t0) # vA/vA+1 <- a0/a1
diff --git a/runtime/interpreter/mterp/out/mterp_mips.S b/runtime/interpreter/mterp/out/mterp_mips.S
index c1ba794..d3b91e2 100644
--- a/runtime/interpreter/mterp/out/mterp_mips.S
+++ b/runtime/interpreter/mterp/out/mterp_mips.S
@@ -160,6 +160,58 @@
#define fcc1 $fcc1
#endif
+#ifdef MIPS32REVGE2
+#define SEB(rd, rt) \
+ seb rd, rt
+#define SEH(rd, rt) \
+ seh rd, rt
+#define INSERT_HIGH_HALF(rd_lo, rt_hi) \
+ ins rd_lo, rt_hi, 16, 16
+#else
+#define SEB(rd, rt) \
+ sll rd, rt, 24; \
+ sra rd, rd, 24
+#define SEH(rd, rt) \
+ sll rd, rt, 16; \
+ sra rd, rd, 16
+/* Clobbers rt_hi on pre-R2. */
+#define INSERT_HIGH_HALF(rd_lo, rt_hi) \
+ sll rt_hi, rt_hi, 16; \
+ or rd_lo, rt_hi
+#endif
+
+#ifdef FPU64
+#define MOVE_TO_FPU_HIGH(r, flo, fhi) \
+ mthc1 r, flo
+#else
+#define MOVE_TO_FPU_HIGH(r, flo, fhi) \
+ mtc1 r, fhi
+#endif
+
+#ifdef MIPS32REVGE6
+#define JR(rt) \
+ jic rt, 0
+#define LSA(rd, rs, rt, sa) \
+ .if sa; \
+ lsa rd, rs, rt, sa; \
+ .else; \
+ addu rd, rs, rt; \
+ .endif
+#else
+#define JR(rt) \
+ jalr zero, rt
+#define LSA(rd, rs, rt, sa) \
+ .if sa; \
+ .set push; \
+ .set noat; \
+ sll AT, rs, sa; \
+ addu rd, AT, rt; \
+ .set pop; \
+ .else; \
+ addu rd, rs, rt; \
+ .endif
+#endif
+
/*
* Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
* to access other shadow frame fields, we need to use a backwards offset. Define those here.
@@ -193,12 +245,12 @@
sw rPC, OFF_FP_DEX_PC_PTR(rFP)
#define EXPORT_DEX_PC(tmp) \
- lw tmp, OFF_FP_CODE_ITEM(rFP) \
- sw rPC, OFF_FP_DEX_PC_PTR(rFP) \
- addu tmp, CODEITEM_INSNS_OFFSET \
- subu tmp, rPC, tmp \
- sra tmp, tmp, 1 \
- sw tmp, OFF_FP_DEX_PC(rFP)
+ lw tmp, OFF_FP_CODE_ITEM(rFP); \
+ sw rPC, OFF_FP_DEX_PC_PTR(rFP); \
+ addu tmp, CODEITEM_INSNS_OFFSET; \
+ subu tmp, rPC, tmp; \
+ sra tmp, tmp, 1; \
+ sw tmp, OFF_FP_DEX_PC(rFP)
/*
* Fetch the next instruction from rPC into rINST. Does not advance rPC.
@@ -213,18 +265,11 @@
* exception catch may miss. (This also implies that it must come after
* EXPORT_PC().)
*/
-#define FETCH_ADVANCE_INST(_count) lhu rINST, ((_count)*2)(rPC); \
+#define FETCH_ADVANCE_INST(_count) \
+ lhu rINST, ((_count)*2)(rPC); \
addu rPC, rPC, ((_count) * 2)
/*
- * The operation performed here is similar to FETCH_ADVANCE_INST, except the
- * src and dest registers are parameterized (not hard-wired to rPC and rINST).
- */
-#define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \
- lhu _dreg, ((_count)*2)(_sreg) ; \
- addu _sreg, _sreg, (_count)*2
-
-/*
* Similar to FETCH_ADVANCE_INST, but does not update rPC. Used to load
* rINST ahead of possible exception point. Be sure to manually advance rPC
* later.
@@ -239,7 +284,8 @@
* rPC to point to the next instruction. "rd" must specify the distance
* in bytes, *not* 16-bit code units, and may be a signed value.
*/
-#define FETCH_ADVANCE_INST_RB(rd) addu rPC, rPC, rd; \
+#define FETCH_ADVANCE_INST_RB(rd) \
+ addu rPC, rPC, rd; \
lhu rINST, (rPC)
/*
@@ -264,38 +310,75 @@
#define GET_INST_OPCODE(rd) and rd, rINST, 0xFF
/*
- * Put the prefetched instruction's opcode field into the specified register.
+ * Transform opcode into branch target address.
*/
-#define GET_PREFETCHED_OPCODE(dreg, sreg) andi dreg, sreg, 255
+#define GET_OPCODE_TARGET(rd) \
+ sll rd, rd, 7; \
+ addu rd, rIBASE, rd
/*
* Begin executing the opcode in rd.
*/
-#define GOTO_OPCODE(rd) sll rd, rd, 7; \
- addu rd, rIBASE, rd; \
- jalr zero, rd
-
-#define GOTO_OPCODE_BASE(_base, rd) sll rd, rd, 7; \
- addu rd, _base, rd; \
- jalr zero, rd
+#define GOTO_OPCODE(rd) \
+ GET_OPCODE_TARGET(rd); \
+ JR(rd)
/*
* Get/set the 32-bit value from a Dalvik register.
*/
#define GET_VREG(rd, rix) LOAD_eas2(rd, rFP, rix)
-#define GET_VREG_F(rd, rix) EAS2(AT, rFP, rix); \
- .set noat; l.s rd, (AT); .set at
+#define GET_VREG_F(rd, rix) \
+ .set noat; \
+ EAS2(AT, rFP, rix); \
+ l.s rd, (AT); \
+ .set at
-#define SET_VREG(rd, rix) .set noat; \
+#ifdef MIPS32REVGE6
+#define SET_VREG(rd, rix) \
+ lsa t8, rix, rFP, 2; \
+ sw rd, 0(t8); \
+ lsa t8, rix, rREFS, 2; \
+ sw zero, 0(t8)
+#else
+#define SET_VREG(rd, rix) \
+ .set noat; \
sll AT, rix, 2; \
addu t8, rFP, AT; \
sw rd, 0(t8); \
addu t8, rREFS, AT; \
.set at; \
sw zero, 0(t8)
+#endif
-#define SET_VREG64(rlo, rhi, rix) .set noat; \
+#ifdef MIPS32REVGE6
+#define SET_VREG_OBJECT(rd, rix) \
+ lsa t8, rix, rFP, 2; \
+ sw rd, 0(t8); \
+ lsa t8, rix, rREFS, 2; \
+ sw rd, 0(t8)
+#else
+#define SET_VREG_OBJECT(rd, rix) \
+ .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ sw rd, 0(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ sw rd, 0(t8)
+#endif
+
+#ifdef MIPS32REVGE6
+#define SET_VREG64(rlo, rhi, rix) \
+ lsa t8, rix, rFP, 2; \
+ sw rlo, 0(t8); \
+ sw rhi, 4(t8); \
+ lsa t8, rix, rREFS, 2; \
+ sw zero, 0(t8); \
+ sw zero, 4(t8)
+#else
+#define SET_VREG64(rlo, rhi, rix) \
+ .set noat; \
sll AT, rix, 2; \
addu t8, rFP, AT; \
sw rlo, 0(t8); \
@@ -304,9 +387,39 @@
.set at; \
sw zero, 0(t8); \
sw zero, 4(t8)
+#endif
-#ifdef FPU64
-#define SET_VREG64_F(rlo, rhi, rix) .set noat; \
+#ifdef MIPS32REVGE6
+#define SET_VREG_F(rd, rix) \
+ lsa t8, rix, rFP, 2; \
+ s.s rd, 0(t8); \
+ lsa t8, rix, rREFS, 2; \
+ sw zero, 0(t8)
+#else
+#define SET_VREG_F(rd, rix) \
+ .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ s.s rd, 0(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ sw zero, 0(t8)
+#endif
+
+#ifdef MIPS32REVGE6
+#define SET_VREG64_F(rlo, rhi, rix) \
+ lsa t8, rix, rFP, 2; \
+ .set noat; \
+ mfhc1 AT, rlo; \
+ s.s rlo, 0(t8); \
+ sw AT, 4(t8); \
+ .set at; \
+ lsa t8, rix, rREFS, 2; \
+ sw zero, 0(t8); \
+ sw zero, 4(t8)
+#elif defined(FPU64)
+#define SET_VREG64_F(rlo, rhi, rix) \
+ .set noat; \
sll AT, rix, 2; \
addu t8, rREFS, AT; \
sw zero, 0(t8); \
@@ -317,7 +430,8 @@
.set at; \
s.s rlo, 0(t8)
#else
-#define SET_VREG64_F(rlo, rhi, rix) .set noat; \
+#define SET_VREG64_F(rlo, rhi, rix) \
+ .set noat; \
sll AT, rix, 2; \
addu t8, rFP, AT; \
s.s rlo, 0(t8); \
@@ -328,18 +442,21 @@
sw zero, 4(t8)
#endif
-#define SET_VREG_OBJECT(rd, rix) .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- sw rd, 0(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- sw rd, 0(t8)
-
/* Combination of the SET_VREG and GOTO_OPCODE functions to save 1 instruction */
-#define SET_VREG_GOTO(rd, rix, dst) .set noreorder; \
- sll dst, dst, 7; \
- addu dst, rIBASE, dst; \
+#ifdef MIPS32REVGE6
+#define SET_VREG_GOTO(rd, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
+ lsa t8, rix, rFP, 2; \
+ sw rd, 0(t8); \
+ lsa t8, rix, rREFS, 2; \
+ jalr zero, dst; \
+ sw zero, 0(t8); \
+ .set reorder
+#else
+#define SET_VREG_GOTO(rd, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
.set noat; \
sll AT, rix, 2; \
addu t8, rFP, AT; \
@@ -349,11 +466,51 @@
jalr zero, dst; \
sw zero, 0(t8); \
.set reorder
+#endif
+
+/* Combination of the SET_VREG_OBJECT and GOTO_OPCODE functions to save 1 instruction */
+#ifdef MIPS32REVGE6
+#define SET_VREG_OBJECT_GOTO(rd, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
+ lsa t8, rix, rFP, 2; \
+ sw rd, 0(t8); \
+ lsa t8, rix, rREFS, 2; \
+ jalr zero, dst; \
+ sw rd, 0(t8); \
+ .set reorder
+#else
+#define SET_VREG_OBJECT_GOTO(rd, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
+ .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ sw rd, 0(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ jalr zero, dst; \
+ sw rd, 0(t8); \
+ .set reorder
+#endif
/* Combination of the SET_VREG64 and GOTO_OPCODE functions to save 1 instruction */
-#define SET_VREG64_GOTO(rlo, rhi, rix, dst) .set noreorder; \
- sll dst, dst, 7; \
- addu dst, rIBASE, dst; \
+#ifdef MIPS32REVGE6
+#define SET_VREG64_GOTO(rlo, rhi, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
+ lsa t8, rix, rFP, 2; \
+ sw rlo, 0(t8); \
+ sw rhi, 4(t8); \
+ lsa t8, rix, rREFS, 2; \
+ sw zero, 0(t8); \
+ jalr zero, dst; \
+ sw zero, 4(t8); \
+ .set reorder
+#else
+#define SET_VREG64_GOTO(rlo, rhi, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
.set noat; \
sll AT, rix, 2; \
addu t8, rFP, AT; \
@@ -365,14 +522,82 @@
jalr zero, dst; \
sw zero, 4(t8); \
.set reorder
+#endif
-#define SET_VREG_F(rd, rix) .set noat; \
+/* Combination of the SET_VREG_F and GOTO_OPCODE functions to save 1 instruction */
+#ifdef MIPS32REVGE6
+#define SET_VREG_F_GOTO(rd, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
+ lsa t8, rix, rFP, 2; \
+ s.s rd, 0(t8); \
+ lsa t8, rix, rREFS, 2; \
+ jalr zero, dst; \
+ sw zero, 0(t8); \
+ .set reorder
+#else
+#define SET_VREG_F_GOTO(rd, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
+ .set noat; \
sll AT, rix, 2; \
addu t8, rFP, AT; \
s.s rd, 0(t8); \
addu t8, rREFS, AT; \
.set at; \
- sw zero, 0(t8)
+ jalr zero, dst; \
+ sw zero, 0(t8); \
+ .set reorder
+#endif
+
+/* Combination of the SET_VREG64_F and GOTO_OPCODE functions to save 1 instruction */
+#ifdef MIPS32REVGE6
+#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
+ lsa t8, rix, rFP, 2; \
+ .set noat; \
+ mfhc1 AT, rlo; \
+ s.s rlo, 0(t8); \
+ sw AT, 4(t8); \
+ .set at; \
+ lsa t8, rix, rREFS, 2; \
+ sw zero, 0(t8); \
+ jalr zero, dst; \
+ sw zero, 4(t8); \
+ .set reorder
+#elif defined(FPU64)
+#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
+ .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rREFS, AT; \
+ sw zero, 0(t8); \
+ sw zero, 4(t8); \
+ addu t8, rFP, AT; \
+ mfhc1 AT, rlo; \
+ sw AT, 4(t8); \
+ .set at; \
+ jalr zero, dst; \
+ s.s rlo, 0(t8); \
+ .set reorder
+#else
+#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
+ .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ s.s rlo, 0(t8); \
+ s.s rhi, 4(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ sw zero, 0(t8); \
+ jalr zero, dst; \
+ sw zero, 4(t8); \
+ .set reorder
+#endif
#define GET_OPA(rd) srl rd, rINST, 8
#ifdef MIPS32REVGE2
@@ -383,60 +608,60 @@
#define GET_OPB(rd) srl rd, rINST, 12
/*
- * Form an Effective Address rd = rbase + roff<<n;
- * Uses reg AT
+ * Form an Effective Address rd = rbase + roff<<shift;
+ * Uses reg AT on pre-R6.
*/
-#define EASN(rd, rbase, roff, rshift) .set noat; \
- sll AT, roff, rshift; \
- addu rd, rbase, AT; \
- .set at
+#define EASN(rd, rbase, roff, shift) LSA(rd, roff, rbase, shift)
#define EAS1(rd, rbase, roff) EASN(rd, rbase, roff, 1)
#define EAS2(rd, rbase, roff) EASN(rd, rbase, roff, 2)
#define EAS3(rd, rbase, roff) EASN(rd, rbase, roff, 3)
#define EAS4(rd, rbase, roff) EASN(rd, rbase, roff, 4)
-/*
- * Form an Effective Shift Right rd = rbase + roff>>n;
- * Uses reg AT
- */
-#define ESRN(rd, rbase, roff, rshift) .set noat; \
- srl AT, roff, rshift; \
- addu rd, rbase, AT; \
+#define LOAD_eas2(rd, rbase, roff) \
+ .set noat; \
+ EAS2(AT, rbase, roff); \
+ lw rd, 0(AT); \
.set at
-#define LOAD_eas2(rd, rbase, roff) EAS2(AT, rbase, roff); \
- .set noat; lw rd, 0(AT); .set at
-
-#define STORE_eas2(rd, rbase, roff) EAS2(AT, rbase, roff); \
- .set noat; sw rd, 0(AT); .set at
+#define STORE_eas2(rd, rbase, roff) \
+ .set noat; \
+ EAS2(AT, rbase, roff); \
+ sw rd, 0(AT); \
+ .set at
#define LOAD_RB_OFF(rd, rbase, off) lw rd, off(rbase)
#define STORE_RB_OFF(rd, rbase, off) sw rd, off(rbase)
-#define STORE64_off(rlo, rhi, rbase, off) sw rlo, off(rbase); \
+#define STORE64_off(rlo, rhi, rbase, off) \
+ sw rlo, off(rbase); \
sw rhi, (off+4)(rbase)
-#define LOAD64_off(rlo, rhi, rbase, off) lw rlo, off(rbase); \
+#define LOAD64_off(rlo, rhi, rbase, off) \
+ lw rlo, off(rbase); \
lw rhi, (off+4)(rbase)
#define STORE64(rlo, rhi, rbase) STORE64_off(rlo, rhi, rbase, 0)
#define LOAD64(rlo, rhi, rbase) LOAD64_off(rlo, rhi, rbase, 0)
#ifdef FPU64
-#define STORE64_off_F(rlo, rhi, rbase, off) s.s rlo, off(rbase); \
+#define STORE64_off_F(rlo, rhi, rbase, off) \
+ s.s rlo, off(rbase); \
.set noat; \
mfhc1 AT, rlo; \
sw AT, (off+4)(rbase); \
.set at
-#define LOAD64_off_F(rlo, rhi, rbase, off) l.s rlo, off(rbase); \
+#define LOAD64_off_F(rlo, rhi, rbase, off) \
+ l.s rlo, off(rbase); \
.set noat; \
lw AT, (off+4)(rbase); \
mthc1 AT, rlo; \
.set at
#else
-#define STORE64_off_F(rlo, rhi, rbase, off) s.s rlo, off(rbase); \
+#define STORE64_off_F(rlo, rhi, rbase, off) \
+ s.s rlo, off(rbase); \
s.s rhi, (off+4)(rbase)
-#define LOAD64_off_F(rlo, rhi, rbase, off) l.s rlo, off(rbase); \
+#define LOAD64_off_F(rlo, rhi, rbase, off) \
+ l.s rlo, off(rbase); \
l.s rhi, (off+4)(rbase)
#endif
@@ -498,6 +723,14 @@
#define REFRESH_IBASE() \
lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
+/* Constants for float/double_to_int/long conversions */
+#define INT_MIN 0x80000000
+#define INT_MIN_AS_FLOAT 0xCF000000
+#define INT_MIN_AS_DOUBLE_HIGH 0xC1E00000
+#define LONG_MIN_HIGH 0x80000000
+#define LONG_MIN_AS_FLOAT 0xDF000000
+#define LONG_MIN_AS_DOUBLE_HIGH 0xC3E00000
+
/* File: mips/entry.S */
/*
* Copyright (C) 2016 The Android Open Source Project
@@ -599,11 +832,10 @@
GET_VREG(a2, a1) # a2 <- fp[B]
GET_INST_OPCODE(t0) # t0 <- opcode from rINST
.if 0
- SET_VREG_OBJECT(a2, a0) # fp[A] <- a2
+ SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[A] <- a2
.else
- SET_VREG(a2, a0) # fp[A] <- a2
+ SET_VREG_GOTO(a2, a0, t0) # fp[A] <- a2
.endif
- GOTO_OPCODE(t0) # jump to next instruction
/* ------------------------------ */
.balign 128
@@ -617,11 +849,10 @@
GET_VREG(a2, a1) # a2 <- fp[BBBB]
GET_INST_OPCODE(t0) # extract opcode from rINST
.if 0
- SET_VREG_OBJECT(a2, a0) # fp[AA] <- a2
+ SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[AA] <- a2
.else
- SET_VREG(a2, a0) # fp[AA] <- a2
+ SET_VREG_GOTO(a2, a0, t0) # fp[AA] <- a2
.endif
- GOTO_OPCODE(t0) # jump to next instruction
/* ------------------------------ */
.balign 128
@@ -635,11 +866,10 @@
GET_VREG(a2, a1) # a2 <- fp[BBBB]
GET_INST_OPCODE(t0) # extract opcode from rINST
.if 0
- SET_VREG_OBJECT(a2, a0) # fp[AAAA] <- a2
+ SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[AAAA] <- a2
.else
- SET_VREG(a2, a0) # fp[AAAA] <- a2
+ SET_VREG_GOTO(a2, a0, t0) # fp[AAAA] <- a2
.endif
- GOTO_OPCODE(t0) # jump to next instruction
/* ------------------------------ */
.balign 128
@@ -652,9 +882,8 @@
EAS2(a3, rFP, a3) # a3 <- &fp[B]
LOAD64(a0, a1, a3) # a0/a1 <- fp[B]
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- SET_VREG64(a0, a1, a2) # fp[A] <- a0/a1
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_GOTO(a0, a1, a2, t0) # fp[A] <- a0/a1
/* ------------------------------ */
.balign 128
@@ -667,9 +896,8 @@
EAS2(a3, rFP, a3) # a3 <- &fp[BBBB]
LOAD64(a0, a1, a3) # a0/a1 <- fp[BBBB]
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- SET_VREG64(a0, a1, a2) # fp[AA] <- a0/a1
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_GOTO(a0, a1, a2, t0) # fp[AA] <- a0/a1
/* ------------------------------ */
.balign 128
@@ -682,9 +910,8 @@
EAS2(a3, rFP, a3) # a3 <- &fp[BBBB]
LOAD64(a0, a1, a3) # a0/a1 <- fp[BBBB]
FETCH_ADVANCE_INST(3) # advance rPC, load rINST
- SET_VREG64(a0, a1, a2) # fp[AAAA] <- a0/a1
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_GOTO(a0, a1, a2, t0) # fp[AAAA] <- a0/a1
/* ------------------------------ */
.balign 128
@@ -699,11 +926,10 @@
GET_VREG(a2, a1) # a2 <- fp[B]
GET_INST_OPCODE(t0) # t0 <- opcode from rINST
.if 1
- SET_VREG_OBJECT(a2, a0) # fp[A] <- a2
+ SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[A] <- a2
.else
- SET_VREG(a2, a0) # fp[A] <- a2
+ SET_VREG_GOTO(a2, a0, t0) # fp[A] <- a2
.endif
- GOTO_OPCODE(t0) # jump to next instruction
/* ------------------------------ */
@@ -719,11 +945,10 @@
GET_VREG(a2, a1) # a2 <- fp[BBBB]
GET_INST_OPCODE(t0) # extract opcode from rINST
.if 1
- SET_VREG_OBJECT(a2, a0) # fp[AA] <- a2
+ SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[AA] <- a2
.else
- SET_VREG(a2, a0) # fp[AA] <- a2
+ SET_VREG_GOTO(a2, a0, t0) # fp[AA] <- a2
.endif
- GOTO_OPCODE(t0) # jump to next instruction
/* ------------------------------ */
@@ -739,11 +964,10 @@
GET_VREG(a2, a1) # a2 <- fp[BBBB]
GET_INST_OPCODE(t0) # extract opcode from rINST
.if 1
- SET_VREG_OBJECT(a2, a0) # fp[AAAA] <- a2
+ SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[AAAA] <- a2
.else
- SET_VREG(a2, a0) # fp[AAAA] <- a2
+ SET_VREG_GOTO(a2, a0, t0) # fp[AAAA] <- a2
.endif
- GOTO_OPCODE(t0) # jump to next instruction
/* ------------------------------ */
@@ -758,11 +982,10 @@
lw a0, 0(a0) # a0 <- result.i
GET_INST_OPCODE(t0) # extract opcode from rINST
.if 0
- SET_VREG_OBJECT(a0, a2) # fp[AA] <- a0
+ SET_VREG_OBJECT_GOTO(a0, a2, t0) # fp[AA] <- a0
.else
- SET_VREG(a0, a2) # fp[AA] <- a0
+ SET_VREG_GOTO(a0, a2, t0) # fp[AA] <- a0
.endif
- GOTO_OPCODE(t0) # jump to next instruction
/* ------------------------------ */
.balign 128
@@ -773,9 +996,8 @@
lw a3, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
LOAD64(a0, a1, a3) # a0/a1 <- retval.j
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- SET_VREG64(a0, a1, a2) # fp[AA] <- a0/a1
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_GOTO(a0, a1, a2, t0) # fp[AA] <- a0/a1
/* ------------------------------ */
.balign 128
@@ -790,11 +1012,10 @@
lw a0, 0(a0) # a0 <- result.i
GET_INST_OPCODE(t0) # extract opcode from rINST
.if 1
- SET_VREG_OBJECT(a0, a2) # fp[AA] <- a0
+ SET_VREG_OBJECT_GOTO(a0, a2, t0) # fp[AA] <- a0
.else
- SET_VREG(a0, a2) # fp[AA] <- a0
+ SET_VREG_GOTO(a0, a2, t0) # fp[AA] <- a0
.endif
- GOTO_OPCODE(t0) # jump to next instruction
/* ------------------------------ */
@@ -805,10 +1026,11 @@
GET_OPA(a2) # a2 <- AA
lw a3, THREAD_EXCEPTION_OFFSET(rSELF) # get exception obj
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- SET_VREG_OBJECT(a3, a2) # fp[AA] <- exception obj
GET_INST_OPCODE(t0) # extract opcode from rINST
+ GET_OPCODE_TARGET(t0)
+ SET_VREG_OBJECT(a3, a2) # fp[AA] <- exception obj
sw zero, THREAD_EXCEPTION_OFFSET(rSELF) # clear exception
- GOTO_OPCODE(t0) # jump to next instruction
+ JR(t0) # jump to next instruction
/* ------------------------------ */
.balign 128
@@ -899,7 +1121,7 @@
.balign 128
.L_op_const_4: /* 0x12 */
/* File: mips/op_const_4.S */
- # const/4 vA, /* +B */
+ /* const/4 vA, +B */
sll a1, rINST, 16 # a1 <- Bxxx0000
GET_OPA(a0) # a0 <- A+
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
@@ -912,7 +1134,7 @@
.balign 128
.L_op_const_16: /* 0x13 */
/* File: mips/op_const_16.S */
- # const/16 vAA, /* +BBBB */
+ /* const/16 vAA, +BBBB */
FETCH_S(a0, 1) # a0 <- ssssBBBB (sign-extended)
GET_OPA(a3) # a3 <- AA
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
@@ -923,13 +1145,12 @@
.balign 128
.L_op_const: /* 0x14 */
/* File: mips/op_const.S */
- # const vAA, /* +BBBBbbbb */
+ /* const vAA, +BBBBbbbb */
GET_OPA(a3) # a3 <- AA
FETCH(a0, 1) # a0 <- bbbb (low)
FETCH(a1, 2) # a1 <- BBBB (high)
FETCH_ADVANCE_INST(3) # advance rPC, load rINST
- sll a1, a1, 16
- or a0, a1, a0 # a0 <- BBBBbbbb
+ INSERT_HIGH_HALF(a0, a1) # a0 <- BBBBbbbb
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(a0, a3, t0) # vAA <- a0
@@ -937,7 +1158,7 @@
.balign 128
.L_op_const_high16: /* 0x15 */
/* File: mips/op_const_high16.S */
- # const/high16 vAA, /* +BBBB0000 */
+ /* const/high16 vAA, +BBBB0000 */
FETCH(a0, 1) # a0 <- 0000BBBB (zero-extended)
GET_OPA(a3) # a3 <- AA
sll a0, a0, 16 # a0 <- BBBB0000
@@ -949,69 +1170,62 @@
.balign 128
.L_op_const_wide_16: /* 0x16 */
/* File: mips/op_const_wide_16.S */
- # const-wide/16 vAA, /* +BBBB */
+ /* const-wide/16 vAA, +BBBB */
FETCH_S(a0, 1) # a0 <- ssssBBBB (sign-extended)
GET_OPA(a3) # a3 <- AA
sra a1, a0, 31 # a1 <- ssssssss
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64(a0, a1, a3) # vAA <- a0/a1
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_GOTO(a0, a1, a3, t0) # vAA/vAA+1 <- a0/a1
/* ------------------------------ */
.balign 128
.L_op_const_wide_32: /* 0x17 */
/* File: mips/op_const_wide_32.S */
- # const-wide/32 vAA, /* +BBBBbbbb */
+ /* const-wide/32 vAA, +BBBBbbbb */
FETCH(a0, 1) # a0 <- 0000bbbb (low)
GET_OPA(a3) # a3 <- AA
FETCH_S(a2, 2) # a2 <- ssssBBBB (high)
FETCH_ADVANCE_INST(3) # advance rPC, load rINST
- sll a2, a2, 16
- or a0, a0, a2 # a0 <- BBBBbbbb
+ INSERT_HIGH_HALF(a0, a2) # a0 <- BBBBbbbb
sra a1, a0, 31 # a1 <- ssssssss
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64(a0, a1, a3) # vAA <- a0/a1
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_GOTO(a0, a1, a3, t0) # vAA/vAA+1 <- a0/a1
/* ------------------------------ */
.balign 128
.L_op_const_wide: /* 0x18 */
/* File: mips/op_const_wide.S */
- # const-wide vAA, /* +HHHHhhhhBBBBbbbb */
+ /* const-wide vAA, +HHHHhhhhBBBBbbbb */
FETCH(a0, 1) # a0 <- bbbb (low)
FETCH(a1, 2) # a1 <- BBBB (low middle)
FETCH(a2, 3) # a2 <- hhhh (high middle)
- sll a1, 16 #
- or a0, a1 # a0 <- BBBBbbbb (low word)
+ INSERT_HIGH_HALF(a0, a1) # a0 <- BBBBbbbb (low word)
FETCH(a3, 4) # a3 <- HHHH (high)
GET_OPA(t1) # t1 <- AA
- sll a3, 16
- or a1, a3, a2 # a1 <- HHHHhhhh (high word)
+ INSERT_HIGH_HALF(a2, a3) # a2 <- HHHHhhhh (high word)
FETCH_ADVANCE_INST(5) # advance rPC, load rINST
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64(a0, a1, t1) # vAA <- a0/a1
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_GOTO(a0, a2, t1, t0) # vAA/vAA+1 <- a0/a2
/* ------------------------------ */
.balign 128
.L_op_const_wide_high16: /* 0x19 */
/* File: mips/op_const_wide_high16.S */
- # const-wide/high16 vAA, /* +BBBB000000000000 */
+ /* const-wide/high16 vAA, +BBBB000000000000 */
FETCH(a1, 1) # a1 <- 0000BBBB (zero-extended)
GET_OPA(a3) # a3 <- AA
li a0, 0 # a0 <- 00000000
sll a1, 16 # a1 <- BBBB0000
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64(a0, a1, a3) # vAA <- a0/a1
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_GOTO(a0, a1, a3, t0) # vAA/vAA+1 <- a0/a1
/* ------------------------------ */
.balign 128
.L_op_const_string: /* 0x1a */
/* File: mips/op_const_string.S */
- # const/string vAA, String /* BBBB */
+ /* const/string vAA, string@BBBB */
EXPORT_PC()
FETCH(a0, 1) # a0 <- BBBB
GET_OPA(a1) # a1 <- AA
@@ -1028,13 +1242,12 @@
.balign 128
.L_op_const_string_jumbo: /* 0x1b */
/* File: mips/op_const_string_jumbo.S */
- # const/string vAA, String /* BBBBBBBB */
+ /* const/string vAA, string@BBBBBBBB */
EXPORT_PC()
FETCH(a0, 1) # a0 <- bbbb (low)
FETCH(a2, 2) # a2 <- BBBB (high)
GET_OPA(a1) # a1 <- AA
- sll a2, a2, 16
- or a0, a0, a2 # a0 <- BBBBbbbb
+ INSERT_HIGH_HALF(a0, a2) # a0 <- BBBBbbbb
addu a2, rFP, OFF_FP_SHADOWFRAME # a2 <- shadow frame
move a3, rSELF
JAL(MterpConstString) # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
@@ -1048,7 +1261,7 @@
.balign 128
.L_op_const_class: /* 0x1c */
/* File: mips/op_const_class.S */
- # const/class vAA, Class /* BBBB */
+ /* const/class vAA, class@BBBB */
EXPORT_PC()
FETCH(a0, 1) # a0 <- BBBB
GET_OPA(a1) # a1 <- AA
@@ -1108,7 +1321,7 @@
/*
* Check to see if a cast from one class to another is allowed.
*/
- # check-cast vAA, class /* BBBB */
+ /* check-cast vAA, class@BBBB */
EXPORT_PC()
FETCH(a0, 1) # a0 <- BBBB
GET_OPA(a1) # a1 <- AA
@@ -1132,7 +1345,7 @@
* Most common situation is a non-null object, being compared against
* an already-resolved class.
*/
- # instance-of vA, vB, class /* CCCC */
+ /* instance-of vA, vB, class@CCCC */
EXPORT_PC()
FETCH(a0, 1) # a0 <- CCCC
GET_OPB(a1) # a1 <- B
@@ -1155,6 +1368,7 @@
/*
* Return the length of an array.
*/
+ /* array-length vA, vB */
GET_OPB(a1) # a1 <- B
GET_OPA4(a2) # a2 <- A+
GET_VREG(a0, a1) # a0 <- vB (object ref)
@@ -1172,7 +1386,7 @@
/*
* Create a new instance of a class.
*/
- # new-instance vAA, class /* BBBB */
+ /* new-instance vAA, class@BBBB */
EXPORT_PC()
addu a0, rFP, OFF_FP_SHADOWFRAME
move a1, rSELF
@@ -1215,8 +1429,8 @@
*
* for: filled-new-array, filled-new-array/range
*/
- # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
- # op {vCCCC..v(CCCC+AA-1)}, type /* BBBB */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
.extern MterpFilledNewArray
EXPORT_PC()
addu a0, rFP, OFF_FP_SHADOWFRAME # a0 <- shadow frame
@@ -1238,8 +1452,8 @@
*
* for: filled-new-array, filled-new-array/range
*/
- # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
- # op {vCCCC..v(CCCC+AA-1)}, type /* BBBB */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
.extern MterpFilledNewArrayRange
EXPORT_PC()
addu a0, rFP, OFF_FP_SHADOWFRAME # a0 <- shadow frame
@@ -1258,11 +1472,10 @@
/* File: mips/op_fill_array_data.S */
/* fill-array-data vAA, +BBBBBBBB */
EXPORT_PC()
- FETCH(a0, 1) # a0 <- bbbb (lo)
- FETCH(a1, 2) # a1 <- BBBB (hi)
+ FETCH(a1, 1) # a1 <- bbbb (lo)
+ FETCH(a0, 2) # a0 <- BBBB (hi)
GET_OPA(a3) # a3 <- AA
- sll a1, a1, 16 # a1 <- BBBBbbbb
- or a1, a0, a1 # a1 <- BBBBbbbb
+ INSERT_HIGH_HALF(a1, a0) # a1 <- BBBBbbbb
GET_VREG(a0, a3) # a0 <- vAA (array object)
EAS1(a1, rPC, a1) # a1 <- PC + BBBBbbbb*2 (array data off.)
JAL(MterpFillArrayData) # v0 <- Mterp(obj, payload)
@@ -1330,10 +1543,9 @@
* our "backward branch" test must be "<=0" instead of "<0".
*/
/* goto/32 +AAAAAAAA */
- FETCH(a0, 1) # a0 <- aaaa (lo)
+ FETCH(rINST, 1) # rINST <- aaaa (lo)
FETCH(a1, 2) # a1 <- AAAA (hi)
- sll a1, a1, 16
- or rINST, a0, a1 # rINST <- AAAAaaaa
+ INSERT_HIGH_HALF(rINST, a1) # rINST <- AAAAaaaa
b MterpCommonTakenBranchNoFlags
/* ------------------------------ */
@@ -1353,8 +1565,7 @@
FETCH(a0, 1) # a0 <- bbbb (lo)
FETCH(a1, 2) # a1 <- BBBB (hi)
GET_OPA(a3) # a3 <- AA
- sll t0, a1, 16
- or a0, a0, t0 # a0 <- BBBBbbbb
+ INSERT_HIGH_HALF(a0, a1) # a0 <- BBBBbbbb
GET_VREG(a1, a3) # a1 <- vAA
EAS1(a0, rPC, a0) # a0 <- PC + BBBBbbbb*2
JAL(MterpDoPackedSwitch) # a0 <- code-unit branch offset
@@ -1379,8 +1590,7 @@
FETCH(a0, 1) # a0 <- bbbb (lo)
FETCH(a1, 2) # a1 <- BBBB (hi)
GET_OPA(a3) # a3 <- AA
- sll t0, a1, 16
- or a0, a0, t0 # a0 <- BBBBbbbb
+ INSERT_HIGH_HALF(a0, a1) # a0 <- BBBBbbbb
GET_VREG(a1, a3) # a1 <- vAA
EAS1(a0, rPC, a0) # a0 <- PC + BBBBbbbb*2
JAL(MterpDoSparseSwitch) # a0 <- code-unit branch offset
@@ -1393,55 +1603,54 @@
.L_op_cmpl_float: /* 0x2d */
/* File: mips/op_cmpl_float.S */
/*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register rTEMP based on the results of the comparison.
- *
- * Provide a "naninst" instruction that puts 1 or -1 into rTEMP depending
- * on what value we'd like to return when one of the operands is NaN.
- *
- * The operation we're implementing is:
- * if (x == y)
- * return 0;
- * else if (x < y)
- * return -1;
- * else if (x > y)
- * return 1;
- * else
- * return {-1 or 1}; // one or both operands was NaN
+ * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
+ * into the destination register based on the comparison results.
*
* for: cmpl-float, cmpg-float
*/
/* op vAA, vBB, vCC */
- /* "clasic" form */
FETCH(a0, 1) # a0 <- CCBB
and a2, a0, 255 # a2 <- BB
srl a3, a0, 8
GET_VREG_F(ft0, a2)
GET_VREG_F(ft1, a3)
#ifdef MIPS32REVGE6
- cmp.lt.s ft2, ft0, ft1 # Is ft0 < ft1
- li rTEMP, -1
- bc1nez ft2, .Lop_cmpl_float_finish
- cmp.lt.s ft2, ft1, ft0
- li rTEMP, 1
- bc1nez ft2, .Lop_cmpl_float_finish
cmp.eq.s ft2, ft0, ft1
li rTEMP, 0
- bc1nez ft2, .Lop_cmpl_float_finish
- b .Lop_cmpl_float_nan
-#else
- c.olt.s fcc0, ft0, ft1 # Is ft0 < ft1
+ bc1nez ft2, 1f # done if vBB == vCC (ordered)
+ .if 0
+ cmp.lt.s ft2, ft0, ft1
li rTEMP, -1
- bc1t fcc0, .Lop_cmpl_float_finish
- c.olt.s fcc0, ft1, ft0
+ bc1nez ft2, 1f # done if vBB < vCC (ordered)
+ li rTEMP, 1 # vBB > vCC or unordered
+ .else
+ cmp.lt.s ft2, ft1, ft0
li rTEMP, 1
- bc1t fcc0, .Lop_cmpl_float_finish
+ bc1nez ft2, 1f # done if vBB > vCC (ordered)
+ li rTEMP, -1 # vBB < vCC or unordered
+ .endif
+#else
c.eq.s fcc0, ft0, ft1
li rTEMP, 0
- bc1t fcc0, .Lop_cmpl_float_finish
- b .Lop_cmpl_float_nan
+ bc1t fcc0, 1f # done if vBB == vCC (ordered)
+ .if 0
+ c.olt.s fcc0, ft0, ft1
+ li rTEMP, -1
+ bc1t fcc0, 1f # done if vBB < vCC (ordered)
+ li rTEMP, 1 # vBB > vCC or unordered
+ .else
+ c.olt.s fcc0, ft1, ft0
+ li rTEMP, 1
+ bc1t fcc0, 1f # done if vBB > vCC (ordered)
+ li rTEMP, -1 # vBB < vCC or unordered
+ .endif
#endif
+1:
+ GET_OPA(rOBJ)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
/* ------------------------------ */
.balign 128
@@ -1449,55 +1658,54 @@
/* File: mips/op_cmpg_float.S */
/* File: mips/op_cmpl_float.S */
/*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register rTEMP based on the results of the comparison.
- *
- * Provide a "naninst" instruction that puts 1 or -1 into rTEMP depending
- * on what value we'd like to return when one of the operands is NaN.
- *
- * The operation we're implementing is:
- * if (x == y)
- * return 0;
- * else if (x < y)
- * return -1;
- * else if (x > y)
- * return 1;
- * else
- * return {-1 or 1}; // one or both operands was NaN
+ * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
+ * into the destination register based on the comparison results.
*
* for: cmpl-float, cmpg-float
*/
/* op vAA, vBB, vCC */
- /* "clasic" form */
FETCH(a0, 1) # a0 <- CCBB
and a2, a0, 255 # a2 <- BB
srl a3, a0, 8
GET_VREG_F(ft0, a2)
GET_VREG_F(ft1, a3)
#ifdef MIPS32REVGE6
- cmp.lt.s ft2, ft0, ft1 # Is ft0 < ft1
- li rTEMP, -1
- bc1nez ft2, .Lop_cmpg_float_finish
- cmp.lt.s ft2, ft1, ft0
- li rTEMP, 1
- bc1nez ft2, .Lop_cmpg_float_finish
cmp.eq.s ft2, ft0, ft1
li rTEMP, 0
- bc1nez ft2, .Lop_cmpg_float_finish
- b .Lop_cmpg_float_nan
-#else
- c.olt.s fcc0, ft0, ft1 # Is ft0 < ft1
+ bc1nez ft2, 1f # done if vBB == vCC (ordered)
+ .if 1
+ cmp.lt.s ft2, ft0, ft1
li rTEMP, -1
- bc1t fcc0, .Lop_cmpg_float_finish
- c.olt.s fcc0, ft1, ft0
+ bc1nez ft2, 1f # done if vBB < vCC (ordered)
+ li rTEMP, 1 # vBB > vCC or unordered
+ .else
+ cmp.lt.s ft2, ft1, ft0
li rTEMP, 1
- bc1t fcc0, .Lop_cmpg_float_finish
+ bc1nez ft2, 1f # done if vBB > vCC (ordered)
+ li rTEMP, -1 # vBB < vCC or unordered
+ .endif
+#else
c.eq.s fcc0, ft0, ft1
li rTEMP, 0
- bc1t fcc0, .Lop_cmpg_float_finish
- b .Lop_cmpg_float_nan
+ bc1t fcc0, 1f # done if vBB == vCC (ordered)
+ .if 1
+ c.olt.s fcc0, ft0, ft1
+ li rTEMP, -1
+ bc1t fcc0, 1f # done if vBB < vCC (ordered)
+ li rTEMP, 1 # vBB > vCC or unordered
+ .else
+ c.olt.s fcc0, ft1, ft0
+ li rTEMP, 1
+ bc1t fcc0, 1f # done if vBB > vCC (ordered)
+ li rTEMP, -1 # vBB < vCC or unordered
+ .endif
#endif
+1:
+ GET_OPA(rOBJ)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
/* ------------------------------ */
@@ -1506,47 +1714,55 @@
/* File: mips/op_cmpl_double.S */
/*
* Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
- * into the destination register (rTEMP) based on the comparison results.
- *
- * Provide a "naninst" instruction that puts 1 or -1 into rTEMP depending
- * on what value we'd like to return when one of the operands is NaN.
- *
- * See op_cmpl_float for more details.
+ * into the destination register based on the comparison results.
*
* For: cmpl-double, cmpg-double
*/
/* op vAA, vBB, vCC */
FETCH(a0, 1) # a0 <- CCBB
- and rOBJ, a0, 255 # s5 <- BB
+ and rOBJ, a0, 255 # rOBJ <- BB
srl t0, a0, 8 # t0 <- CC
- EAS2(rOBJ, rFP, rOBJ) # s5 <- &fp[BB]
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[BB]
EAS2(t0, rFP, t0) # t0 <- &fp[CC]
LOAD64_F(ft0, ft0f, rOBJ)
LOAD64_F(ft1, ft1f, t0)
#ifdef MIPS32REVGE6
- cmp.lt.d ft2, ft0, ft1
- li rTEMP, -1
- bc1nez ft2, .Lop_cmpl_double_finish
- cmp.lt.d ft2, ft1, ft0
- li rTEMP, 1
- bc1nez ft2, .Lop_cmpl_double_finish
cmp.eq.d ft2, ft0, ft1
li rTEMP, 0
- bc1nez ft2, .Lop_cmpl_double_finish
- b .Lop_cmpl_double_nan
-#else
- c.olt.d fcc0, ft0, ft1
+ bc1nez ft2, 1f # done if vBB == vCC (ordered)
+ .if 0
+ cmp.lt.d ft2, ft0, ft1
li rTEMP, -1
- bc1t fcc0, .Lop_cmpl_double_finish
- c.olt.d fcc0, ft1, ft0
+ bc1nez ft2, 1f # done if vBB < vCC (ordered)
+ li rTEMP, 1 # vBB > vCC or unordered
+ .else
+ cmp.lt.d ft2, ft1, ft0
li rTEMP, 1
- bc1t fcc0, .Lop_cmpl_double_finish
+ bc1nez ft2, 1f # done if vBB > vCC (ordered)
+ li rTEMP, -1 # vBB < vCC or unordered
+ .endif
+#else
c.eq.d fcc0, ft0, ft1
li rTEMP, 0
- bc1t fcc0, .Lop_cmpl_double_finish
- b .Lop_cmpl_double_nan
+ bc1t fcc0, 1f # done if vBB == vCC (ordered)
+ .if 0
+ c.olt.d fcc0, ft0, ft1
+ li rTEMP, -1
+ bc1t fcc0, 1f # done if vBB < vCC (ordered)
+ li rTEMP, 1 # vBB > vCC or unordered
+ .else
+ c.olt.d fcc0, ft1, ft0
+ li rTEMP, 1
+ bc1t fcc0, 1f # done if vBB > vCC (ordered)
+ li rTEMP, -1 # vBB < vCC or unordered
+ .endif
#endif
+1:
+ GET_OPA(rOBJ)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
/* ------------------------------ */
.balign 128
@@ -1555,47 +1771,55 @@
/* File: mips/op_cmpl_double.S */
/*
* Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
- * into the destination register (rTEMP) based on the comparison results.
- *
- * Provide a "naninst" instruction that puts 1 or -1 into rTEMP depending
- * on what value we'd like to return when one of the operands is NaN.
- *
- * See op_cmpl_float for more details.
+ * into the destination register based on the comparison results.
*
* For: cmpl-double, cmpg-double
*/
/* op vAA, vBB, vCC */
FETCH(a0, 1) # a0 <- CCBB
- and rOBJ, a0, 255 # s5 <- BB
+ and rOBJ, a0, 255 # rOBJ <- BB
srl t0, a0, 8 # t0 <- CC
- EAS2(rOBJ, rFP, rOBJ) # s5 <- &fp[BB]
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[BB]
EAS2(t0, rFP, t0) # t0 <- &fp[CC]
LOAD64_F(ft0, ft0f, rOBJ)
LOAD64_F(ft1, ft1f, t0)
#ifdef MIPS32REVGE6
- cmp.lt.d ft2, ft0, ft1
- li rTEMP, -1
- bc1nez ft2, .Lop_cmpg_double_finish
- cmp.lt.d ft2, ft1, ft0
- li rTEMP, 1
- bc1nez ft2, .Lop_cmpg_double_finish
cmp.eq.d ft2, ft0, ft1
li rTEMP, 0
- bc1nez ft2, .Lop_cmpg_double_finish
- b .Lop_cmpg_double_nan
-#else
- c.olt.d fcc0, ft0, ft1
+ bc1nez ft2, 1f # done if vBB == vCC (ordered)
+ .if 1
+ cmp.lt.d ft2, ft0, ft1
li rTEMP, -1
- bc1t fcc0, .Lop_cmpg_double_finish
- c.olt.d fcc0, ft1, ft0
+ bc1nez ft2, 1f # done if vBB < vCC (ordered)
+ li rTEMP, 1 # vBB > vCC or unordered
+ .else
+ cmp.lt.d ft2, ft1, ft0
li rTEMP, 1
- bc1t fcc0, .Lop_cmpg_double_finish
+ bc1nez ft2, 1f # done if vBB > vCC (ordered)
+ li rTEMP, -1 # vBB < vCC or unordered
+ .endif
+#else
c.eq.d fcc0, ft0, ft1
li rTEMP, 0
- bc1t fcc0, .Lop_cmpg_double_finish
- b .Lop_cmpg_double_nan
+ bc1t fcc0, 1f # done if vBB == vCC (ordered)
+ .if 1
+ c.olt.d fcc0, ft0, ft1
+ li rTEMP, -1
+ bc1t fcc0, 1f # done if vBB < vCC (ordered)
+ li rTEMP, 1 # vBB > vCC or unordered
+ .else
+ c.olt.d fcc0, ft1, ft0
+ li rTEMP, 1
+ bc1t fcc0, 1f # done if vBB > vCC (ordered)
+ li rTEMP, -1 # vBB < vCC or unordered
+ .endif
#endif
+1:
+ GET_OPA(rOBJ)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
/* ------------------------------ */
@@ -2015,11 +2239,7 @@
# null array object?
beqz a0, common_errNullObject # yes, bail
LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- .if 2
EASN(a0, a0, a1, 2) # a0 <- arrayObj + index*width
- .else
- addu a0, a0, a1
- .endif
# a1 >= a3; compare unsigned index
bgeu a1, a3, common_errArrayIndex # index >= length, bail
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
@@ -2074,10 +2294,9 @@
lw a1, THREAD_EXCEPTION_OFFSET(rSELF)
PREFETCH_INST(2) # load rINST
bnez a1, MterpException
- SET_VREG_OBJECT(v0, rOBJ) # vAA <- v0
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG_OBJECT_GOTO(v0, rOBJ, t0) # vAA <- v0
/* ------------------------------ */
.balign 128
@@ -2104,11 +2323,7 @@
# null array object?
beqz a0, common_errNullObject # yes, bail
LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- .if 0
EASN(a0, a0, a1, 0) # a0 <- arrayObj + index*width
- .else
- addu a0, a0, a1
- .endif
# a1 >= a3; compare unsigned index
bgeu a1, a3, common_errArrayIndex # index >= length, bail
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
@@ -2142,11 +2357,7 @@
# null array object?
beqz a0, common_errNullObject # yes, bail
LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- .if 0
EASN(a0, a0, a1, 0) # a0 <- arrayObj + index*width
- .else
- addu a0, a0, a1
- .endif
# a1 >= a3; compare unsigned index
bgeu a1, a3, common_errArrayIndex # index >= length, bail
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
@@ -2180,11 +2391,7 @@
# null array object?
beqz a0, common_errNullObject # yes, bail
LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- .if 1
EASN(a0, a0, a1, 1) # a0 <- arrayObj + index*width
- .else
- addu a0, a0, a1
- .endif
# a1 >= a3; compare unsigned index
bgeu a1, a3, common_errArrayIndex # index >= length, bail
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
@@ -2218,11 +2425,7 @@
# null array object?
beqz a0, common_errNullObject # yes, bail
LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- .if 1
EASN(a0, a0, a1, 1) # a0 <- arrayObj + index*width
- .else
- addu a0, a0, a1
- .endif
# a1 >= a3; compare unsigned index
bgeu a1, a3, common_errArrayIndex # index >= length, bail
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
@@ -2253,17 +2456,14 @@
# null array object?
beqz a0, common_errNullObject # yes, bail
LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- .if 2
EASN(a0, a0, a1, 2) # a0 <- arrayObj + index*width
- .else
- addu a0, a0, a1
- .endif
bgeu a1, a3, common_errArrayIndex # index >= length, bail
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
GET_VREG(a2, rOBJ) # a2 <- vAA
GET_INST_OPCODE(t0) # extract opcode from rINST
+ GET_OPCODE_TARGET(t0)
sw a2, MIRROR_INT_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
- GOTO_OPCODE(t0) # jump to next instruction
+ JR(t0) # jump to next instruction
/* ------------------------------ */
.balign 128
@@ -2271,8 +2471,6 @@
/* File: mips/op_aput_wide.S */
/*
* Array put, 64 bits. vBB[vCC] <- vAA.
- *
- * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
*/
/* aput-wide vAA, vBB, vCC */
FETCH(a0, 1) # a0 <- CCBB
@@ -2292,8 +2490,9 @@
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
LOAD64(a2, a3, rOBJ) # a2/a3 <- vAA/vAA+1
GET_INST_OPCODE(t0) # extract opcode from rINST
+ GET_OPCODE_TARGET(t0)
STORE64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET) # a2/a3 <- vBB[vCC]
- GOTO_OPCODE(t0) # jump to next instruction
+ JR(t0) # jump to next instruction
/* ------------------------------ */
.balign 128
@@ -2337,17 +2536,14 @@
# null array object?
beqz a0, common_errNullObject # yes, bail
LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- .if 0
EASN(a0, a0, a1, 0) # a0 <- arrayObj + index*width
- .else
- addu a0, a0, a1
- .endif
bgeu a1, a3, common_errArrayIndex # index >= length, bail
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
GET_VREG(a2, rOBJ) # a2 <- vAA
GET_INST_OPCODE(t0) # extract opcode from rINST
+ GET_OPCODE_TARGET(t0)
sb a2, MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
- GOTO_OPCODE(t0) # jump to next instruction
+ JR(t0) # jump to next instruction
/* ------------------------------ */
@@ -2373,17 +2569,14 @@
# null array object?
beqz a0, common_errNullObject # yes, bail
LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- .if 0
EASN(a0, a0, a1, 0) # a0 <- arrayObj + index*width
- .else
- addu a0, a0, a1
- .endif
bgeu a1, a3, common_errArrayIndex # index >= length, bail
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
GET_VREG(a2, rOBJ) # a2 <- vAA
GET_INST_OPCODE(t0) # extract opcode from rINST
+ GET_OPCODE_TARGET(t0)
sb a2, MIRROR_BYTE_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
- GOTO_OPCODE(t0) # jump to next instruction
+ JR(t0) # jump to next instruction
/* ------------------------------ */
@@ -2409,17 +2602,14 @@
# null array object?
beqz a0, common_errNullObject # yes, bail
LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- .if 1
EASN(a0, a0, a1, 1) # a0 <- arrayObj + index*width
- .else
- addu a0, a0, a1
- .endif
bgeu a1, a3, common_errArrayIndex # index >= length, bail
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
GET_VREG(a2, rOBJ) # a2 <- vAA
GET_INST_OPCODE(t0) # extract opcode from rINST
+ GET_OPCODE_TARGET(t0)
sh a2, MIRROR_CHAR_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
- GOTO_OPCODE(t0) # jump to next instruction
+ JR(t0) # jump to next instruction
/* ------------------------------ */
@@ -2445,17 +2635,14 @@
# null array object?
beqz a0, common_errNullObject # yes, bail
LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- .if 1
EASN(a0, a0, a1, 1) # a0 <- arrayObj + index*width
- .else
- addu a0, a0, a1
- .endif
bgeu a1, a3, common_errArrayIndex # index >= length, bail
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
GET_VREG(a2, rOBJ) # a2 <- vAA
GET_INST_OPCODE(t0) # extract opcode from rINST
+ GET_OPCODE_TARGET(t0)
sh a2, MIRROR_SHORT_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
- GOTO_OPCODE(t0) # jump to next instruction
+ JR(t0) # jump to next instruction
/* ------------------------------ */
@@ -2467,6 +2654,7 @@
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
*/
+ /* op vA, vB, field@CCCC */
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
GET_OPB(a1) # a1 <- B
@@ -2478,14 +2666,13 @@
GET_OPA4(a2) # a2<- A+
PREFETCH_INST(2) # load rINST
bnez a3, MterpPossibleException # bail out
- .if 0
- SET_VREG_OBJECT(v0, a2) # fp[A] <- v0
- .else
- SET_VREG(v0, a2) # fp[A] <- v0
- .endif
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ .if 0
+ SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[A] <- v0
+ .else
+ SET_VREG_GOTO(v0, a2, t0) # fp[A] <- v0
+ .endif
/* ------------------------------ */
.balign 128
@@ -2496,6 +2683,7 @@
*
* for: iget-wide
*/
+ /* op vA, vB, field@CCCC */
EXPORT_PC()
FETCH(a0, 1) # a0 <- field byte offset
GET_OPB(a1) # a1 <- B
@@ -2507,10 +2695,9 @@
GET_OPA4(a2) # a2<- A+
PREFETCH_INST(2) # load rINST
bnez a3, MterpException # bail out
- SET_VREG64(v0, v1, a2) # fp[A] <- v0/v1
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_GOTO(v0, v1, a2, t0) # fp[A] <- v0/v1
/* ------------------------------ */
.balign 128
@@ -2522,6 +2709,7 @@
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
*/
+ /* op vA, vB, field@CCCC */
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
GET_OPB(a1) # a1 <- B
@@ -2533,14 +2721,13 @@
GET_OPA4(a2) # a2<- A+
PREFETCH_INST(2) # load rINST
bnez a3, MterpPossibleException # bail out
- .if 1
- SET_VREG_OBJECT(v0, a2) # fp[A] <- v0
- .else
- SET_VREG(v0, a2) # fp[A] <- v0
- .endif
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ .if 1
+ SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[A] <- v0
+ .else
+ SET_VREG_GOTO(v0, a2, t0) # fp[A] <- v0
+ .endif
/* ------------------------------ */
@@ -2553,6 +2740,7 @@
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
*/
+ /* op vA, vB, field@CCCC */
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
GET_OPB(a1) # a1 <- B
@@ -2564,14 +2752,13 @@
GET_OPA4(a2) # a2<- A+
PREFETCH_INST(2) # load rINST
bnez a3, MterpPossibleException # bail out
- .if 0
- SET_VREG_OBJECT(v0, a2) # fp[A] <- v0
- .else
- SET_VREG(v0, a2) # fp[A] <- v0
- .endif
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ .if 0
+ SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[A] <- v0
+ .else
+ SET_VREG_GOTO(v0, a2, t0) # fp[A] <- v0
+ .endif
/* ------------------------------ */
@@ -2584,6 +2771,7 @@
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
*/
+ /* op vA, vB, field@CCCC */
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
GET_OPB(a1) # a1 <- B
@@ -2595,14 +2783,13 @@
GET_OPA4(a2) # a2<- A+
PREFETCH_INST(2) # load rINST
bnez a3, MterpPossibleException # bail out
- .if 0
- SET_VREG_OBJECT(v0, a2) # fp[A] <- v0
- .else
- SET_VREG(v0, a2) # fp[A] <- v0
- .endif
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ .if 0
+ SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[A] <- v0
+ .else
+ SET_VREG_GOTO(v0, a2, t0) # fp[A] <- v0
+ .endif
/* ------------------------------ */
@@ -2615,6 +2802,7 @@
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
*/
+ /* op vA, vB, field@CCCC */
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
GET_OPB(a1) # a1 <- B
@@ -2626,14 +2814,13 @@
GET_OPA4(a2) # a2<- A+
PREFETCH_INST(2) # load rINST
bnez a3, MterpPossibleException # bail out
- .if 0
- SET_VREG_OBJECT(v0, a2) # fp[A] <- v0
- .else
- SET_VREG(v0, a2) # fp[A] <- v0
- .endif
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ .if 0
+ SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[A] <- v0
+ .else
+ SET_VREG_GOTO(v0, a2, t0) # fp[A] <- v0
+ .endif
/* ------------------------------ */
@@ -2646,6 +2833,7 @@
*
* for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
*/
+ /* op vA, vB, field@CCCC */
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
GET_OPB(a1) # a1 <- B
@@ -2657,14 +2845,13 @@
GET_OPA4(a2) # a2<- A+
PREFETCH_INST(2) # load rINST
bnez a3, MterpPossibleException # bail out
- .if 0
- SET_VREG_OBJECT(v0, a2) # fp[A] <- v0
- .else
- SET_VREG(v0, a2) # fp[A] <- v0
- .endif
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ .if 0
+ SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[A] <- v0
+ .else
+ SET_VREG_GOTO(v0, a2, t0) # fp[A] <- v0
+ .endif
/* ------------------------------ */
@@ -2676,7 +2863,7 @@
*
* for: iput, iput-boolean, iput-byte, iput-char, iput-short
*/
- # op vA, vB, field /* CCCC */
+ /* op vA, vB, field@CCCC */
.extern artSet32InstanceFromMterp
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
@@ -2696,7 +2883,7 @@
.balign 128
.L_op_iput_wide: /* 0x5a */
/* File: mips/op_iput_wide.S */
- # iput-wide vA, vB, field /* CCCC */
+ /* iput-wide vA, vB, field@CCCC */
.extern artSet64InstanceFromMterp
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
@@ -2721,7 +2908,7 @@
*
* for: iput-object, iput-object-volatile
*/
- # op vA, vB, field /* CCCC */
+ /* op vA, vB, field@CCCC */
EXPORT_PC()
addu a0, rFP, OFF_FP_SHADOWFRAME
move a1, rPC
@@ -2743,7 +2930,7 @@
*
* for: iput, iput-boolean, iput-byte, iput-char, iput-short
*/
- # op vA, vB, field /* CCCC */
+ /* op vA, vB, field@CCCC */
.extern artSet8InstanceFromMterp
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
@@ -2770,7 +2957,7 @@
*
* for: iput, iput-boolean, iput-byte, iput-char, iput-short
*/
- # op vA, vB, field /* CCCC */
+ /* op vA, vB, field@CCCC */
.extern artSet8InstanceFromMterp
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
@@ -2797,7 +2984,7 @@
*
* for: iput, iput-boolean, iput-byte, iput-char, iput-short
*/
- # op vA, vB, field /* CCCC */
+ /* op vA, vB, field@CCCC */
.extern artSet16InstanceFromMterp
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
@@ -2824,7 +3011,7 @@
*
* for: iput, iput-boolean, iput-byte, iput-char, iput-short
*/
- # op vA, vB, field /* CCCC */
+ /* op vA, vB, field@CCCC */
.extern artSet16InstanceFromMterp
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
@@ -2850,7 +3037,7 @@
*
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
- # op vAA, field /* BBBB */
+ /* op vAA, field@BBBB */
.extern artGet32StaticFromCode
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
@@ -2861,14 +3048,13 @@
GET_OPA(a2) # a2 <- AA
PREFETCH_INST(2)
bnez a3, MterpException # bail out
-.if 0
- SET_VREG_OBJECT(v0, a2) # fp[AA] <- v0
-.else
- SET_VREG(v0, a2) # fp[AA] <- v0
-.endif
ADVANCE(2)
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+.if 0
+ SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[AA] <- v0
+.else
+ SET_VREG_GOTO(v0, a2, t0) # fp[AA] <- v0
+.endif
/* ------------------------------ */
.balign 128
@@ -2877,7 +3063,7 @@
/*
* 64-bit SGET handler.
*/
- # sget-wide vAA, field /* BBBB */
+ /* sget-wide vAA, field@BBBB */
.extern artGet64StaticFromCode
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
@@ -2888,9 +3074,8 @@
bnez a3, MterpException
GET_OPA(a1) # a1 <- AA
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- SET_VREG64(v0, v1, a1) # vAA/vAA+1 <- v0/v1
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_GOTO(v0, v1, a1, t0) # vAA/vAA+1 <- v0/v1
/* ------------------------------ */
.balign 128
@@ -2902,7 +3087,7 @@
*
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
- # op vAA, field /* BBBB */
+ /* op vAA, field@BBBB */
.extern artGetObjStaticFromCode
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
@@ -2913,14 +3098,13 @@
GET_OPA(a2) # a2 <- AA
PREFETCH_INST(2)
bnez a3, MterpException # bail out
-.if 1
- SET_VREG_OBJECT(v0, a2) # fp[AA] <- v0
-.else
- SET_VREG(v0, a2) # fp[AA] <- v0
-.endif
ADVANCE(2)
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+.if 1
+ SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[AA] <- v0
+.else
+ SET_VREG_GOTO(v0, a2, t0) # fp[AA] <- v0
+.endif
/* ------------------------------ */
@@ -2933,7 +3117,7 @@
*
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
- # op vAA, field /* BBBB */
+ /* op vAA, field@BBBB */
.extern artGetBooleanStaticFromCode
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
@@ -2944,14 +3128,13 @@
GET_OPA(a2) # a2 <- AA
PREFETCH_INST(2)
bnez a3, MterpException # bail out
-.if 0
- SET_VREG_OBJECT(v0, a2) # fp[AA] <- v0
-.else
- SET_VREG(v0, a2) # fp[AA] <- v0
-.endif
ADVANCE(2)
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+.if 0
+ SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[AA] <- v0
+.else
+ SET_VREG_GOTO(v0, a2, t0) # fp[AA] <- v0
+.endif
/* ------------------------------ */
@@ -2964,7 +3147,7 @@
*
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
- # op vAA, field /* BBBB */
+ /* op vAA, field@BBBB */
.extern artGetByteStaticFromCode
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
@@ -2975,14 +3158,13 @@
GET_OPA(a2) # a2 <- AA
PREFETCH_INST(2)
bnez a3, MterpException # bail out
-.if 0
- SET_VREG_OBJECT(v0, a2) # fp[AA] <- v0
-.else
- SET_VREG(v0, a2) # fp[AA] <- v0
-.endif
ADVANCE(2)
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+.if 0
+ SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[AA] <- v0
+.else
+ SET_VREG_GOTO(v0, a2, t0) # fp[AA] <- v0
+.endif
/* ------------------------------ */
@@ -2995,7 +3177,7 @@
*
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
- # op vAA, field /* BBBB */
+ /* op vAA, field@BBBB */
.extern artGetCharStaticFromCode
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
@@ -3006,14 +3188,13 @@
GET_OPA(a2) # a2 <- AA
PREFETCH_INST(2)
bnez a3, MterpException # bail out
-.if 0
- SET_VREG_OBJECT(v0, a2) # fp[AA] <- v0
-.else
- SET_VREG(v0, a2) # fp[AA] <- v0
-.endif
ADVANCE(2)
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+.if 0
+ SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[AA] <- v0
+.else
+ SET_VREG_GOTO(v0, a2, t0) # fp[AA] <- v0
+.endif
/* ------------------------------ */
@@ -3026,7 +3207,7 @@
*
* for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
*/
- # op vAA, field /* BBBB */
+ /* op vAA, field@BBBB */
.extern artGetShortStaticFromCode
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
@@ -3037,14 +3218,13 @@
GET_OPA(a2) # a2 <- AA
PREFETCH_INST(2)
bnez a3, MterpException # bail out
-.if 0
- SET_VREG_OBJECT(v0, a2) # fp[AA] <- v0
-.else
- SET_VREG(v0, a2) # fp[AA] <- v0
-.endif
ADVANCE(2)
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+.if 0
+ SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[AA] <- v0
+.else
+ SET_VREG_GOTO(v0, a2, t0) # fp[AA] <- v0
+.endif
/* ------------------------------ */
@@ -3056,7 +3236,7 @@
*
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
- # op vAA, field /* BBBB */
+ /* op vAA, field@BBBB */
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
GET_OPA(a3) # a3 <- AA
@@ -3077,7 +3257,7 @@
/*
* 64-bit SPUT handler.
*/
- # sput-wide vAA, field /* BBBB */
+ /* sput-wide vAA, field@BBBB */
.extern artSet64IndirectStaticFromMterp
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref CCCC
@@ -3123,7 +3303,7 @@
*
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
- # op vAA, field /* BBBB */
+ /* op vAA, field@BBBB */
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
GET_OPA(a3) # a3 <- AA
@@ -3148,7 +3328,7 @@
*
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
- # op vAA, field /* BBBB */
+ /* op vAA, field@BBBB */
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
GET_OPA(a3) # a3 <- AA
@@ -3173,7 +3353,7 @@
*
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
- # op vAA, field /* BBBB */
+ /* op vAA, field@BBBB */
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
GET_OPA(a3) # a3 <- AA
@@ -3198,7 +3378,7 @@
*
* for: sput, sput-boolean, sput-byte, sput-char, sput-short
*/
- # op vAA, field /* BBBB */
+ /* op vAA, field@BBBB */
EXPORT_PC()
FETCH(a0, 1) # a0 <- field ref BBBB
GET_OPA(a3) # a3 <- AA
@@ -3221,8 +3401,8 @@
/*
* Generic invoke handler wrapper.
*/
- # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
- # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
.extern MterpInvokeVirtual
EXPORT_PC()
move a0, rSELF
@@ -3246,8 +3426,8 @@
/*
* Generic invoke handler wrapper.
*/
- # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
- # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
.extern MterpInvokeSuper
EXPORT_PC()
move a0, rSELF
@@ -3271,8 +3451,8 @@
/*
* Generic invoke handler wrapper.
*/
- # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
- # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
.extern MterpInvokeDirect
EXPORT_PC()
move a0, rSELF
@@ -3296,8 +3476,8 @@
/*
* Generic invoke handler wrapper.
*/
- # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
- # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
.extern MterpInvokeStatic
EXPORT_PC()
move a0, rSELF
@@ -3321,8 +3501,8 @@
/*
* Generic invoke handler wrapper.
*/
- # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
- # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
.extern MterpInvokeInterface
EXPORT_PC()
move a0, rSELF
@@ -3360,8 +3540,8 @@
/*
* Generic invoke handler wrapper.
*/
- # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
- # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
.extern MterpInvokeVirtualRange
EXPORT_PC()
move a0, rSELF
@@ -3385,8 +3565,8 @@
/*
* Generic invoke handler wrapper.
*/
- # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
- # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
.extern MterpInvokeSuperRange
EXPORT_PC()
move a0, rSELF
@@ -3410,8 +3590,8 @@
/*
* Generic invoke handler wrapper.
*/
- # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
- # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
.extern MterpInvokeDirectRange
EXPORT_PC()
move a0, rSELF
@@ -3435,8 +3615,8 @@
/*
* Generic invoke handler wrapper.
*/
- # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
- # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
.extern MterpInvokeStaticRange
EXPORT_PC()
move a0, rSELF
@@ -3460,8 +3640,8 @@
/*
* Generic invoke handler wrapper.
*/
- # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
- # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
.extern MterpInvokeInterfaceRange
EXPORT_PC()
move a0, rSELF
@@ -3506,11 +3686,11 @@
/* File: mips/unop.S */
/*
* Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op a0".
+ * specifies an instruction that performs "result0 = op a0".
* This could be a MIPS instruction or a function call.
*
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
+ * for: int-to-byte, int-to-char, int-to-short,
+ * neg-int, not-int, neg-float
*/
/* unop vA, vB */
GET_OPB(a3) # a3 <- B
@@ -3520,8 +3700,7 @@
# optional op
negu a0, a0 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t1) # extract opcode from rINST
- SET_VREG_GOTO(a0, t0, t1) # vAA <- result0
- /* 9-10 instructions */
+ SET_VREG_GOTO(a0, t0, t1) # vA <- result0
/* ------------------------------ */
@@ -3531,11 +3710,11 @@
/* File: mips/unop.S */
/*
* Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op a0".
+ * specifies an instruction that performs "result0 = op a0".
* This could be a MIPS instruction or a function call.
*
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
+ * for: int-to-byte, int-to-char, int-to-short,
+ * neg-int, not-int, neg-float
*/
/* unop vA, vB */
GET_OPB(a3) # a3 <- B
@@ -3545,8 +3724,7 @@
# optional op
not a0, a0 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t1) # extract opcode from rINST
- SET_VREG_GOTO(a0, t0, t1) # vAA <- result0
- /* 9-10 instructions */
+ SET_VREG_GOTO(a0, t0, t1) # vA <- result0
/* ------------------------------ */
@@ -3556,7 +3734,7 @@
/* File: mips/unopWide.S */
/*
* Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op a0/a1".
+ * specifies an instruction that performs "result0/result1 = op a0/a1".
* This could be MIPS instruction or a function call.
*
* For: neg-long, not-long, neg-double,
@@ -3565,14 +3743,12 @@
GET_OPA4(rOBJ) # rOBJ <- A+
GET_OPB(a3) # a3 <- B
EAS2(a3, rFP, a3) # a3 <- &fp[B]
- LOAD64(a0, a1, a3) # a0/a1 <- vAA
+ LOAD64(a0, a1, a3) # a0/a1 <- vA
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
negu v0, a0 # optional op
negu v1, a1; sltu a0, zero, v0; subu v1, v1, a0 # a0/a1 <- op, a2-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64(v0, v1, rOBJ) # vAA <- a0/a1
- GOTO_OPCODE(t0) # jump to next instruction
- /* 12-13 instructions */
+ SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vA/vA+1 <- a0/a1
/* ------------------------------ */
@@ -3582,7 +3758,7 @@
/* File: mips/unopWide.S */
/*
* Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op a0/a1".
+ * specifies an instruction that performs "result0/result1 = op a0/a1".
* This could be MIPS instruction or a function call.
*
* For: neg-long, not-long, neg-double,
@@ -3591,14 +3767,12 @@
GET_OPA4(rOBJ) # rOBJ <- A+
GET_OPB(a3) # a3 <- B
EAS2(a3, rFP, a3) # a3 <- &fp[B]
- LOAD64(a0, a1, a3) # a0/a1 <- vAA
+ LOAD64(a0, a1, a3) # a0/a1 <- vA
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
not a0, a0 # optional op
not a1, a1 # a0/a1 <- op, a2-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64(a0, a1, rOBJ) # vAA <- a0/a1
- GOTO_OPCODE(t0) # jump to next instruction
- /* 12-13 instructions */
+ SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vA/vA+1 <- a0/a1
/* ------------------------------ */
@@ -3608,11 +3782,11 @@
/* File: mips/unop.S */
/*
* Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op a0".
+ * specifies an instruction that performs "result0 = op a0".
* This could be a MIPS instruction or a function call.
*
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
+ * for: int-to-byte, int-to-char, int-to-short,
+ * neg-int, not-int, neg-float
*/
/* unop vA, vB */
GET_OPB(a3) # a3 <- B
@@ -3622,8 +3796,7 @@
# optional op
addu a0, a0, 0x80000000 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t1) # extract opcode from rINST
- SET_VREG_GOTO(a0, t0, t1) # vAA <- result0
- /* 9-10 instructions */
+ SET_VREG_GOTO(a0, t0, t1) # vA <- result0
/* ------------------------------ */
@@ -3633,7 +3806,7 @@
/* File: mips/unopWide.S */
/*
* Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op a0/a1".
+ * specifies an instruction that performs "result0/result1 = op a0/a1".
* This could be MIPS instruction or a function call.
*
* For: neg-long, not-long, neg-double,
@@ -3642,14 +3815,12 @@
GET_OPA4(rOBJ) # rOBJ <- A+
GET_OPB(a3) # a3 <- B
EAS2(a3, rFP, a3) # a3 <- &fp[B]
- LOAD64(a0, a1, a3) # a0/a1 <- vAA
+ LOAD64(a0, a1, a3) # a0/a1 <- vA
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
# optional op
addu a1, a1, 0x80000000 # a0/a1 <- op, a2-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64(a0, a1, rOBJ) # vAA <- a0/a1
- GOTO_OPCODE(t0) # jump to next instruction
- /* 12-13 instructions */
+ SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vA/vA+1 <- a0/a1
/* ------------------------------ */
@@ -3659,8 +3830,7 @@
/* File: mips/unopWider.S */
/*
* Generic 32bit-to-64bit unary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = op a0", where
- * "result" is a 64-bit quantity in a0/a1.
+ * that specifies an instruction that performs "result0/result1 = op a0".
*
* For: int-to-long
*/
@@ -3672,9 +3842,7 @@
# optional op
sra a1, a0, 31 # result <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64(a0, a1, rOBJ) # vA/vA+1 <- a0/a1
- GOTO_OPCODE(t0) # jump to next instruction
- /* 10-11 instructions */
+ SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vA/vA+1 <- a0/a1
/* ------------------------------ */
@@ -3683,23 +3851,20 @@
/* File: mips/op_int_to_float.S */
/* File: mips/funop.S */
/*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op a0".
+ * Generic 32-bit floating-point unary operation. Provide an "instr"
+ * line that specifies an instruction that performs "fv0 = op fa0".
* This could be a MIPS instruction or a function call.
*
- * for: int-to-float, float-to-int
+ * for: int-to-float
*/
/* unop vA, vB */
GET_OPB(a3) # a3 <- B
- GET_OPA4(rOBJ) # t0 <- A+
+ GET_OPA4(rOBJ) # rOBJ <- A+
GET_VREG_F(fa0, a3)
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
cvt.s.w fv0, fa0
-
-.Lop_int_to_float_set_vreg_f:
- SET_VREG_F(fv0, rOBJ)
GET_INST_OPCODE(t1) # extract opcode from rINST
- GOTO_OPCODE(t1) # jump to next instruction
+ SET_VREG_F_GOTO(fv0, rOBJ, t1) # vA <- fv0
/* ------------------------------ */
@@ -3708,11 +3873,10 @@
/* File: mips/op_int_to_double.S */
/* File: mips/funopWider.S */
/*
- * Generic 32bit-to-64bit unary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = op a0", where
- * "result" is a 64-bit quantity in a0/a1.
+ * Generic 32bit-to-64bit floating-point unary operation. Provide an "instr"
+ * line that specifies an instruction that performs "fv0 = op fa0".
*
- * For: int-to-double, float-to-long, float-to-double
+ * For: int-to-double, float-to-double
*/
/* unop vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
@@ -3720,11 +3884,8 @@
GET_VREG_F(fa0, a3)
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
cvt.d.w fv0, fa0
-
-.Lop_int_to_double_set_vreg:
- SET_VREG64_F(fv0, fv0f, rOBJ) # vA/vA+1 <- a0/a1
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- fv0
/* ------------------------------ */
@@ -3741,120 +3902,157 @@
GET_VREG(a2, a1) # a2 <- fp[B]
GET_INST_OPCODE(t0) # t0 <- opcode from rINST
.if 0
- SET_VREG_OBJECT(a2, a0) # fp[A] <- a2
+ SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[A] <- a2
.else
- SET_VREG(a2, a0) # fp[A] <- a2
+ SET_VREG_GOTO(a2, a0, t0) # fp[A] <- a2
.endif
- GOTO_OPCODE(t0) # jump to next instruction
/* ------------------------------ */
.balign 128
.L_op_long_to_float: /* 0x85 */
/* File: mips/op_long_to_float.S */
-/* File: mips/unopNarrower.S */
/*
- * Generic 64bit-to-32bit unary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = op a0/a1", where
- * "result" is a 32-bit quantity in a0.
- *
- * For: long-to-float, double-to-int, double-to-float
- * If hard floating point support is available, use fa0 as the parameter,
- * except for long-to-float opcode.
- * (This would work for long-to-int, but that instruction is actually
- * an exact match for OP_MOVE.)
+ * long-to-float
*/
/* unop vA, vB */
GET_OPB(a3) # a3 <- B
- GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPA4(rOBJ) # rOBJ <- A+
EAS2(a3, rFP, a3) # a3 <- &fp[B]
+
+#ifdef MIPS32REVGE6
+ LOAD64_F(fv0, fv0f, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ cvt.s.l fv0, fv0
+#else
LOAD64(rARG0, rARG1, a3)
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
JAL(__floatdisf)
+#endif
-.Lop_long_to_float_set_vreg_f:
- SET_VREG_F(fv0, rOBJ) # vA <- result0
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
+ SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- fv0
/* ------------------------------ */
.balign 128
.L_op_long_to_double: /* 0x86 */
/* File: mips/op_long_to_double.S */
-/* File: mips/funopWide.S */
/*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op a0/a1".
- * This could be a MIPS instruction or a function call.
- *
- * long-to-double, double-to-long
+ * long-to-double
*/
/* unop vA, vB */
- GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPA4(rOBJ) # rOBJ <- A+
GET_OPB(a3) # a3 <- B
EAS2(a3, rFP, a3) # a3 <- &fp[B]
+
+#ifdef MIPS32REVGE6
+ LOAD64_F(fv0, fv0f, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ cvt.d.l fv0, fv0
+#else
LOAD64(rARG0, rARG1, a3)
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- # optional op
- JAL(__floatdidf) # a0/a1 <- op, a2-a3 changed
+ JAL(__floatdidf) # a0/a1 <- op, a2-a3 changed
+#endif
-.Lop_long_to_double_set_vreg:
- SET_VREG64_F(fv0, fv0f, rOBJ) # vAA <- a0/a1
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
- /* 12-13 instructions */
-
+ SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- result
/* ------------------------------ */
.balign 128
.L_op_float_to_int: /* 0x87 */
/* File: mips/op_float_to_int.S */
-/* File: mips/funop.S */
/*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op a0".
- * This could be a MIPS instruction or a function call.
+ * float-to-int
*
- * for: int-to-float, float-to-int
+ * We have to clip values to int min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
*/
/* unop vA, vB */
GET_OPB(a3) # a3 <- B
- GET_OPA4(rOBJ) # t0 <- A+
+ GET_OPA4(rOBJ) # rOBJ <- A+
GET_VREG_F(fa0, a3)
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- b f2i_doconv
-.Lop_float_to_int_set_vreg_f:
- SET_VREG_F(fv0, rOBJ)
+ li t0, INT_MIN_AS_FLOAT
+ mtc1 t0, fa1
+#ifdef MIPS32REVGE6
+ /*
+ * TODO: simplify this when the MIPS64R6 emulator
+ * supports NAN2008=1.
+ */
+ cmp.le.s ft0, fa1, fa0
GET_INST_OPCODE(t1) # extract opcode from rINST
- GOTO_OPCODE(t1) # jump to next instruction
-
+ bc1nez ft0, 1f # if INT_MIN <= vB, proceed to truncation
+ cmp.eq.s ft0, fa0, fa0
+ selnez.s fa0, fa1, ft0 # fa0 = ordered(vB) ? INT_MIN_AS_FLOAT : 0
+#else
+ c.ole.s fcc0, fa1, fa0
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ bc1t fcc0, 1f # if INT_MIN <= vB, proceed to truncation
+ c.eq.s fcc0, fa0, fa0
+ mtc1 zero, fa0
+ movt.s fa0, fa1, fcc0 # fa0 = ordered(vB) ? INT_MIN_AS_FLOAT : 0
+#endif
+1:
+ trunc.w.s fa0, fa0
+ SET_VREG_F_GOTO(fa0, rOBJ, t1) # vA <- result
/* ------------------------------ */
.balign 128
.L_op_float_to_long: /* 0x88 */
/* File: mips/op_float_to_long.S */
-/* File: mips/funopWider.S */
/*
- * Generic 32bit-to-64bit unary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = op a0", where
- * "result" is a 64-bit quantity in a0/a1.
+ * float-to-long
*
- * For: int-to-double, float-to-long, float-to-double
+ * We have to clip values to long min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
*/
/* unop vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
GET_OPB(a3) # a3 <- B
GET_VREG_F(fa0, a3)
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- b f2l_doconv
-.Lop_float_to_long_set_vreg:
- SET_VREG64(rRESULT0, rRESULT1, rOBJ) # vA/vA+1 <- a0/a1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+#ifdef MIPS32REVGE6
+ /*
+ * TODO: simplify this when the MIPS64R6 emulator
+ * supports NAN2008=1.
+ */
+ li t0, LONG_MIN_AS_FLOAT
+ mtc1 t0, fa1
+ cmp.le.s ft0, fa1, fa0
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ bc1nez ft0, 1f # if LONG_MIN <= vB, proceed to truncation
+ cmp.eq.s ft0, fa0, fa0
+ selnez.s fa0, fa1, ft0 # fa0 = ordered(vB) ? LONG_MIN_AS_FLOAT : 0
+1:
+ trunc.l.s fa0, fa0
+ SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) # vA <- result
+#else
+ c.eq.s fcc0, fa0, fa0
+ li rRESULT0, 0
+ li rRESULT1, 0
+ bc1f fcc0, .Lop_float_to_long_get_opcode
+ li t0, LONG_MIN_AS_FLOAT
+ mtc1 t0, fa1
+ c.ole.s fcc0, fa0, fa1
+ li rRESULT1, LONG_MIN_HIGH
+ bc1t fcc0, .Lop_float_to_long_get_opcode
+
+ neg.s fa1, fa1
+ c.ole.s fcc0, fa1, fa0
+ nor rRESULT0, rRESULT0, zero
+ nor rRESULT1, rRESULT1, zero
+ bc1t fcc0, .Lop_float_to_long_get_opcode
+
+ JAL(__fixsfdi)
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ b .Lop_float_to_long_set_vreg
+#endif
/* ------------------------------ */
.balign 128
@@ -3862,11 +4060,10 @@
/* File: mips/op_float_to_double.S */
/* File: mips/funopWider.S */
/*
- * Generic 32bit-to-64bit unary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = op a0", where
- * "result" is a 64-bit quantity in a0/a1.
+ * Generic 32bit-to-64bit floating-point unary operation. Provide an "instr"
+ * line that specifies an instruction that performs "fv0 = op fa0".
*
- * For: int-to-double, float-to-long, float-to-double
+ * For: int-to-double, float-to-double
*/
/* unop vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
@@ -3874,77 +4071,111 @@
GET_VREG_F(fa0, a3)
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
cvt.d.s fv0, fa0
-
-.Lop_float_to_double_set_vreg:
- SET_VREG64_F(fv0, fv0f, rOBJ) # vA/vA+1 <- a0/a1
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- fv0
/* ------------------------------ */
.balign 128
.L_op_double_to_int: /* 0x8a */
/* File: mips/op_double_to_int.S */
-/* File: mips/unopNarrower.S */
/*
- * Generic 64bit-to-32bit unary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = op a0/a1", where
- * "result" is a 32-bit quantity in a0.
+ * double-to-int
*
- * For: long-to-float, double-to-int, double-to-float
- * If hard floating point support is available, use fa0 as the parameter,
- * except for long-to-float opcode.
- * (This would work for long-to-int, but that instruction is actually
- * an exact match for OP_MOVE.)
+ * We have to clip values to int min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
*/
/* unop vA, vB */
GET_OPB(a3) # a3 <- B
- GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPA4(rOBJ) # rOBJ <- A+
EAS2(a3, rFP, a3) # a3 <- &fp[B]
LOAD64_F(fa0, fa0f, a3)
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- b d2i_doconv
-.Lop_double_to_int_set_vreg_f:
- SET_VREG_F(fv0, rOBJ) # vA <- result0
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/*
- * Convert the double in a0/a1 to an int in a0.
- *
- * We have to clip values to int min/max per the specification. The
- * expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us.
- */
+ li t0, INT_MIN_AS_DOUBLE_HIGH
+ mtc1 zero, fa1
+ MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
+#ifdef MIPS32REVGE6
+ /*
+ * TODO: simplify this when the MIPS64R6 emulator
+ * supports NAN2008=1.
+ */
+ cmp.le.d ft0, fa1, fa0
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ bc1nez ft0, 1f # if INT_MIN <= vB, proceed to truncation
+ cmp.eq.d ft0, fa0, fa0
+ selnez.d fa0, fa1, ft0 # fa0 = ordered(vB) ? INT_MIN_AS_DOUBLE : 0
+#else
+ c.ole.d fcc0, fa1, fa0
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ bc1t fcc0, 1f # if INT_MIN <= vB, proceed to truncation
+ c.eq.d fcc0, fa0, fa0
+ mtc1 zero, fa0
+ MOVE_TO_FPU_HIGH(zero, fa0, fa0f)
+ movt.d fa0, fa1, fcc0 # fa0 = ordered(vB) ? INT_MIN_AS_DOUBLE : 0
+#endif
+1:
+ trunc.w.d fa0, fa0
+ SET_VREG_F_GOTO(fa0, rOBJ, t1) # vA <- result
/* ------------------------------ */
.balign 128
.L_op_double_to_long: /* 0x8b */
/* File: mips/op_double_to_long.S */
-/* File: mips/funopWide.S */
/*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op a0/a1".
- * This could be a MIPS instruction or a function call.
+ * double-to-long
*
- * long-to-double, double-to-long
+ * We have to clip values to long min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
*/
/* unop vA, vB */
- GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPA4(rOBJ) # rOBJ <- A+
GET_OPB(a3) # a3 <- B
EAS2(a3, rFP, a3) # a3 <- &fp[B]
LOAD64_F(fa0, fa0f, a3)
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- # optional op
- b d2l_doconv # a0/a1 <- op, a2-a3 changed
-.Lop_double_to_long_set_vreg:
- SET_VREG64(rRESULT0, rRESULT1, rOBJ) # vAA <- a0/a1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
- /* 12-13 instructions */
+#ifdef MIPS32REVGE6
+ /*
+ * TODO: simplify this when the MIPS64R6 emulator
+ * supports NAN2008=1.
+ */
+ li t0, LONG_MIN_AS_DOUBLE_HIGH
+ mtc1 zero, fa1
+ mthc1 t0, fa1
+ cmp.le.d ft0, fa1, fa0
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ bc1nez ft0, 1f # if LONG_MIN <= vB, proceed to truncation
+ cmp.eq.d ft0, fa0, fa0
+ selnez.d fa0, fa1, ft0 # fa0 = ordered(vB) ? LONG_MIN_AS_DOUBLE : 0
+1:
+ trunc.l.d fa0, fa0
+ SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) # vA <- result
+#else
+ c.eq.d fcc0, fa0, fa0
+ li rRESULT0, 0
+ li rRESULT1, 0
+ bc1f fcc0, .Lop_double_to_long_get_opcode
+ li t0, LONG_MIN_AS_DOUBLE_HIGH
+ mtc1 zero, fa1
+ MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
+ c.ole.d fcc0, fa0, fa1
+ li rRESULT1, LONG_MIN_HIGH
+ bc1t fcc0, .Lop_double_to_long_get_opcode
+
+ neg.d fa1, fa1
+ c.ole.d fcc0, fa1, fa0
+ nor rRESULT0, rRESULT0, zero
+ nor rRESULT1, rRESULT1, zero
+ bc1t fcc0, .Lop_double_to_long_get_opcode
+
+ JAL(__fixdfdi)
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ b .Lop_double_to_long_set_vreg
+#endif
/* ------------------------------ */
.balign 128
@@ -3952,28 +4183,20 @@
/* File: mips/op_double_to_float.S */
/* File: mips/unopNarrower.S */
/*
- * Generic 64bit-to-32bit unary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = op a0/a1", where
- * "result" is a 32-bit quantity in a0.
+ * Generic 64bit-to-32bit floating-point unary operation. Provide an "instr"
+ * line that specifies an instruction that performs "fv0 = op fa0".
*
- * For: long-to-float, double-to-int, double-to-float
- * If hard floating point support is available, use fa0 as the parameter,
- * except for long-to-float opcode.
- * (This would work for long-to-int, but that instruction is actually
- * an exact match for OP_MOVE.)
+ * For: double-to-float
*/
/* unop vA, vB */
GET_OPB(a3) # a3 <- B
- GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPA4(rOBJ) # rOBJ <- A+
EAS2(a3, rFP, a3) # a3 <- &fp[B]
LOAD64_F(fa0, fa0f, a3)
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
cvt.s.d fv0, fa0
-
-.Lop_double_to_float_set_vreg_f:
- SET_VREG_F(fv0, rOBJ) # vA <- result0
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- fv0
/* ------------------------------ */
@@ -3983,22 +4206,21 @@
/* File: mips/unop.S */
/*
* Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op a0".
+ * specifies an instruction that performs "result0 = op a0".
* This could be a MIPS instruction or a function call.
*
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
+ * for: int-to-byte, int-to-char, int-to-short,
+ * neg-int, not-int, neg-float
*/
/* unop vA, vB */
GET_OPB(a3) # a3 <- B
GET_OPA4(t0) # t0 <- A+
GET_VREG(a0, a3) # a0 <- vB
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- sll a0, a0, 24 # optional op
- sra a0, a0, 24 # a0 <- op, a0-a3 changed
+ # optional op
+ SEB(a0, a0) # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t1) # extract opcode from rINST
- SET_VREG_GOTO(a0, t0, t1) # vAA <- result0
- /* 9-10 instructions */
+ SET_VREG_GOTO(a0, t0, t1) # vA <- result0
/* ------------------------------ */
@@ -4008,11 +4230,11 @@
/* File: mips/unop.S */
/*
* Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op a0".
+ * specifies an instruction that performs "result0 = op a0".
* This could be a MIPS instruction or a function call.
*
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
+ * for: int-to-byte, int-to-char, int-to-short,
+ * neg-int, not-int, neg-float
*/
/* unop vA, vB */
GET_OPB(a3) # a3 <- B
@@ -4022,8 +4244,7 @@
# optional op
and a0, 0xffff # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t1) # extract opcode from rINST
- SET_VREG_GOTO(a0, t0, t1) # vAA <- result0
- /* 9-10 instructions */
+ SET_VREG_GOTO(a0, t0, t1) # vA <- result0
/* ------------------------------ */
@@ -4033,22 +4254,21 @@
/* File: mips/unop.S */
/*
* Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op a0".
+ * specifies an instruction that performs "result0 = op a0".
* This could be a MIPS instruction or a function call.
*
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
+ * for: int-to-byte, int-to-char, int-to-short,
+ * neg-int, not-int, neg-float
*/
/* unop vA, vB */
GET_OPB(a3) # a3 <- B
GET_OPA4(t0) # t0 <- A+
GET_VREG(a0, a3) # a0 <- vB
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- sll a0, 16 # optional op
- sra a0, 16 # a0 <- op, a0-a3 changed
+ # optional op
+ SEH(a0, a0) # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t1) # extract opcode from rINST
- SET_VREG_GOTO(a0, t0, t1) # vAA <- result0
- /* 9-10 instructions */
+ SET_VREG_GOTO(a0, t0, t1) # vA <- result0
/* ------------------------------ */
@@ -4087,7 +4307,6 @@
addu a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 11-14 instructions */
/* ------------------------------ */
@@ -4126,7 +4345,6 @@
subu a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 11-14 instructions */
/* ------------------------------ */
@@ -4165,7 +4383,6 @@
mul a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 11-14 instructions */
/* ------------------------------ */
@@ -4205,7 +4422,6 @@
div a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 11-14 instructions */
#else
/* File: mips/binop.S */
@@ -4240,7 +4456,6 @@
mflo a0 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 11-14 instructions */
#endif
@@ -4281,7 +4496,6 @@
mod a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 11-14 instructions */
#else
/* File: mips/binop.S */
@@ -4316,7 +4530,6 @@
mfhi a0 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 11-14 instructions */
#endif
@@ -4356,7 +4569,6 @@
and a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 11-14 instructions */
/* ------------------------------ */
@@ -4395,7 +4607,6 @@
or a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 11-14 instructions */
/* ------------------------------ */
@@ -4434,7 +4645,6 @@
xor a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 11-14 instructions */
/* ------------------------------ */
@@ -4473,7 +4683,6 @@
sll a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 11-14 instructions */
/* ------------------------------ */
@@ -4512,7 +4721,6 @@
sra a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 11-14 instructions */
/* ------------------------------ */
@@ -4551,7 +4759,6 @@
srl a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 11-14 instructions */
/* ------------------------------ */
@@ -4571,10 +4778,10 @@
* Generic 64-bit binary operation. Provide an "instr" line that
* specifies an instruction that performs "result = a0-a1 op a2-a3".
* This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
+ * comes back in a register pair other than a0-a1, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
+ * vCC (a2-a3). Useful for integer division and modulus.
*
* for: add-long, sub-long, div-long, rem-long, and-long, or-long,
* xor-long
@@ -4600,7 +4807,6 @@
addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1 # result <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vAA/vAA+1 <- v0/v1
- /* 14-17 instructions */
/* ------------------------------ */
@@ -4619,10 +4825,10 @@
* Generic 64-bit binary operation. Provide an "instr" line that
* specifies an instruction that performs "result = a0-a1 op a2-a3".
* This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
+ * comes back in a register pair other than a0-a1, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
+ * vCC (a2-a3). Useful for integer division and modulus.
*
* for: add-long, sub-long, div-long, rem-long, and-long, or-long,
* xor-long
@@ -4648,7 +4854,6 @@
subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0 # result <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vAA/vAA+1 <- v0/v1
- /* 14-17 instructions */
/* ------------------------------ */
@@ -4702,10 +4907,10 @@
* Generic 64-bit binary operation. Provide an "instr" line that
* specifies an instruction that performs "result = a0-a1 op a2-a3".
* This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
+ * comes back in a register pair other than a0-a1, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
+ * vCC (a2-a3). Useful for integer division and modulus.
*
* for: add-long, sub-long, div-long, rem-long, and-long, or-long,
* xor-long
@@ -4731,7 +4936,6 @@
JAL(__divdi3) # result <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vAA/vAA+1 <- v0/v1
- /* 14-17 instructions */
/* ------------------------------ */
@@ -4743,10 +4947,10 @@
* Generic 64-bit binary operation. Provide an "instr" line that
* specifies an instruction that performs "result = a0-a1 op a2-a3".
* This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
+ * comes back in a register pair other than a0-a1, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
+ * vCC (a2-a3). Useful for integer division and modulus.
*
* for: add-long, sub-long, div-long, rem-long, and-long, or-long,
* xor-long
@@ -4772,7 +4976,6 @@
JAL(__moddi3) # result <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vAA/vAA+1 <- v0/v1
- /* 14-17 instructions */
/* ------------------------------ */
@@ -4784,10 +4987,10 @@
* Generic 64-bit binary operation. Provide an "instr" line that
* specifies an instruction that performs "result = a0-a1 op a2-a3".
* This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
+ * comes back in a register pair other than a0-a1, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
+ * vCC (a2-a3). Useful for integer division and modulus.
*
* for: add-long, sub-long, div-long, rem-long, and-long, or-long,
* xor-long
@@ -4813,7 +5016,6 @@
and a1, a1, a3 # result <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vAA/vAA+1 <- a0/a1
- /* 14-17 instructions */
/* ------------------------------ */
@@ -4825,10 +5027,10 @@
* Generic 64-bit binary operation. Provide an "instr" line that
* specifies an instruction that performs "result = a0-a1 op a2-a3".
* This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
+ * comes back in a register pair other than a0-a1, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
+ * vCC (a2-a3). Useful for integer division and modulus.
*
* for: add-long, sub-long, div-long, rem-long, and-long, or-long,
* xor-long
@@ -4854,7 +5056,6 @@
or a1, a1, a3 # result <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vAA/vAA+1 <- a0/a1
- /* 14-17 instructions */
/* ------------------------------ */
@@ -4866,10 +5067,10 @@
* Generic 64-bit binary operation. Provide an "instr" line that
* specifies an instruction that performs "result = a0-a1 op a2-a3".
* This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
+ * comes back in a register pair other than a0-a1, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
+ * vCC (a2-a3). Useful for integer division and modulus.
*
* for: add-long, sub-long, div-long, rem-long, and-long, or-long,
* xor-long
@@ -4895,7 +5096,6 @@
xor a1, a1, a3 # result <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vAA/vAA+1 <- a0/a1
- /* 14-17 instructions */
/* ------------------------------ */
@@ -4928,7 +5128,7 @@
srl a0, v1 # alo<- alo >> (32-(shift&31))
sll v1, a1, a2 # rhi<- ahi << (shift&31)
or v1, a0 # rhi<- rhi | alo
- SET_VREG64_GOTO(v0, v1, t2, t0) # vAA/vAA+1 <- a0/a1
+ SET_VREG64_GOTO(v0, v1, t2, t0) # vAA/vAA+1 <- v0/v1
/* ------------------------------ */
.balign 128
@@ -4959,7 +5159,7 @@
sll a1, 1
sll a1, a0 # ahi<- ahi << (32-(shift&31))
or v0, a1 # rlo<- rlo | ahi
- SET_VREG64_GOTO(v0, v1, t3, t0) # vAA/VAA+1 <- v0/v0
+ SET_VREG64_GOTO(v0, v1, t3, t0) # vAA/VAA+1 <- v0/v1
/* ------------------------------ */
.balign 128
@@ -5006,7 +5206,7 @@
/* binop vAA, vBB, vCC */
FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # s5 <- AA
+ GET_OPA(rOBJ) # rOBJ <- AA
srl a3, a0, 8 # a3 <- CC
and a2, a0, 255 # a2 <- BB
GET_VREG_F(fa1, a3) # a1 <- vCC
@@ -5014,9 +5214,8 @@
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
add.s fv0, fa0, fa1 # f0 = result
- SET_VREG_F(fv0, rOBJ) # vAA <- fv0
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG_F_GOTO(fv0, rOBJ, t0) # vAA <- fv0
/* ------------------------------ */
@@ -5032,7 +5231,7 @@
/* binop vAA, vBB, vCC */
FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # s5 <- AA
+ GET_OPA(rOBJ) # rOBJ <- AA
srl a3, a0, 8 # a3 <- CC
and a2, a0, 255 # a2 <- BB
GET_VREG_F(fa1, a3) # a1 <- vCC
@@ -5040,9 +5239,8 @@
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
sub.s fv0, fa0, fa1 # f0 = result
- SET_VREG_F(fv0, rOBJ) # vAA <- fv0
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG_F_GOTO(fv0, rOBJ, t0) # vAA <- fv0
/* ------------------------------ */
@@ -5058,7 +5256,7 @@
/* binop vAA, vBB, vCC */
FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # s5 <- AA
+ GET_OPA(rOBJ) # rOBJ <- AA
srl a3, a0, 8 # a3 <- CC
and a2, a0, 255 # a2 <- BB
GET_VREG_F(fa1, a3) # a1 <- vCC
@@ -5066,9 +5264,8 @@
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
mul.s fv0, fa0, fa1 # f0 = result
- SET_VREG_F(fv0, rOBJ) # vAA <- fv0
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG_F_GOTO(fv0, rOBJ, t0) # vAA <- fv0
/* ------------------------------ */
@@ -5084,7 +5281,7 @@
/* binop vAA, vBB, vCC */
FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # s5 <- AA
+ GET_OPA(rOBJ) # rOBJ <- AA
srl a3, a0, 8 # a3 <- CC
and a2, a0, 255 # a2 <- BB
GET_VREG_F(fa1, a3) # a1 <- vCC
@@ -5092,9 +5289,8 @@
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
div.s fv0, fa0, fa1 # f0 = result
- SET_VREG_F(fv0, rOBJ) # vAA <- fv0
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG_F_GOTO(fv0, rOBJ, t0) # vAA <- fv0
/* ------------------------------ */
@@ -5110,7 +5306,7 @@
/* binop vAA, vBB, vCC */
FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # s5 <- AA
+ GET_OPA(rOBJ) # rOBJ <- AA
srl a3, a0, 8 # a3 <- CC
and a2, a0, 255 # a2 <- BB
GET_VREG_F(fa1, a3) # a1 <- vCC
@@ -5118,9 +5314,8 @@
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
JAL(fmodf) # f0 = result
- SET_VREG_F(fv0, rOBJ) # vAA <- fv0
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG_F_GOTO(fv0, rOBJ, t0) # vAA <- fv0
/* ------------------------------ */
@@ -5129,8 +5324,8 @@
/* File: mips/op_add_double.S */
/* File: mips/fbinopWide.S */
/*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * Generic 64-bit floating-point binary operation. Provide an "instr"
+ * line that specifies an instruction that performs "fv0 = fa0 op fa1".
* This could be an MIPS instruction or a function call.
*
* for: add-double, sub-double, mul-double, div-double,
@@ -5139,7 +5334,7 @@
*/
/* binop vAA, vBB, vCC */
FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # s5 <- AA
+ GET_OPA(rOBJ) # rOBJ <- AA
and a2, a0, 255 # a2 <- BB
srl a3, a0, 8 # a3 <- CC
EAS2(a2, rFP, a2) # a2 <- &fp[BB]
@@ -5149,8 +5344,8 @@
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
add.d fv0, fa0, fa1
- SET_VREG64_F(fv0, fv0f, rOBJ)
- b .Lop_add_double_finish
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vAA/vAA+1 <- fv0
/* ------------------------------ */
@@ -5159,8 +5354,8 @@
/* File: mips/op_sub_double.S */
/* File: mips/fbinopWide.S */
/*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * Generic 64-bit floating-point binary operation. Provide an "instr"
+ * line that specifies an instruction that performs "fv0 = fa0 op fa1".
* This could be an MIPS instruction or a function call.
*
* for: add-double, sub-double, mul-double, div-double,
@@ -5169,7 +5364,7 @@
*/
/* binop vAA, vBB, vCC */
FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # s5 <- AA
+ GET_OPA(rOBJ) # rOBJ <- AA
and a2, a0, 255 # a2 <- BB
srl a3, a0, 8 # a3 <- CC
EAS2(a2, rFP, a2) # a2 <- &fp[BB]
@@ -5179,8 +5374,8 @@
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
sub.d fv0, fa0, fa1
- SET_VREG64_F(fv0, fv0f, rOBJ)
- b .Lop_sub_double_finish
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vAA/vAA+1 <- fv0
/* ------------------------------ */
@@ -5189,8 +5384,8 @@
/* File: mips/op_mul_double.S */
/* File: mips/fbinopWide.S */
/*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * Generic 64-bit floating-point binary operation. Provide an "instr"
+ * line that specifies an instruction that performs "fv0 = fa0 op fa1".
* This could be an MIPS instruction or a function call.
*
* for: add-double, sub-double, mul-double, div-double,
@@ -5199,7 +5394,7 @@
*/
/* binop vAA, vBB, vCC */
FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # s5 <- AA
+ GET_OPA(rOBJ) # rOBJ <- AA
and a2, a0, 255 # a2 <- BB
srl a3, a0, 8 # a3 <- CC
EAS2(a2, rFP, a2) # a2 <- &fp[BB]
@@ -5209,8 +5404,8 @@
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
mul.d fv0, fa0, fa1
- SET_VREG64_F(fv0, fv0f, rOBJ)
- b .Lop_mul_double_finish
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vAA/vAA+1 <- fv0
/* ------------------------------ */
@@ -5219,8 +5414,8 @@
/* File: mips/op_div_double.S */
/* File: mips/fbinopWide.S */
/*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * Generic 64-bit floating-point binary operation. Provide an "instr"
+ * line that specifies an instruction that performs "fv0 = fa0 op fa1".
* This could be an MIPS instruction or a function call.
*
* for: add-double, sub-double, mul-double, div-double,
@@ -5229,7 +5424,7 @@
*/
/* binop vAA, vBB, vCC */
FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # s5 <- AA
+ GET_OPA(rOBJ) # rOBJ <- AA
and a2, a0, 255 # a2 <- BB
srl a3, a0, 8 # a3 <- CC
EAS2(a2, rFP, a2) # a2 <- &fp[BB]
@@ -5239,8 +5434,8 @@
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
div.d fv0, fa0, fa1
- SET_VREG64_F(fv0, fv0f, rOBJ)
- b .Lop_div_double_finish
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vAA/vAA+1 <- fv0
/* ------------------------------ */
@@ -5249,8 +5444,8 @@
/* File: mips/op_rem_double.S */
/* File: mips/fbinopWide.S */
/*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * Generic 64-bit floating-point binary operation. Provide an "instr"
+ * line that specifies an instruction that performs "fv0 = fa0 op fa1".
* This could be an MIPS instruction or a function call.
*
* for: add-double, sub-double, mul-double, div-double,
@@ -5259,7 +5454,7 @@
*/
/* binop vAA, vBB, vCC */
FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # s5 <- AA
+ GET_OPA(rOBJ) # rOBJ <- AA
and a2, a0, 255 # a2 <- BB
srl a3, a0, 8 # a3 <- CC
EAS2(a2, rFP, a2) # a2 <- &fp[BB]
@@ -5269,8 +5464,8 @@
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
JAL(fmod)
- SET_VREG64_F(fv0, fv0f, rOBJ)
- b .Lop_rem_double_finish
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vAA/vAA+1 <- fv0
/* ------------------------------ */
@@ -5304,8 +5499,7 @@
# optional op
addu a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-13 instructions */
+ SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
/* ------------------------------ */
@@ -5339,8 +5533,7 @@
# optional op
subu a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-13 instructions */
+ SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
/* ------------------------------ */
@@ -5374,8 +5567,7 @@
# optional op
mul a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-13 instructions */
+ SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
/* ------------------------------ */
@@ -5410,8 +5602,7 @@
# optional op
div a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-13 instructions */
+ SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
#else
/* File: mips/binop2addr.S */
@@ -5441,8 +5632,7 @@
div zero, a0, a1 # optional op
mflo a0 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-13 instructions */
+ SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
#endif
@@ -5478,8 +5668,7 @@
# optional op
mod a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-13 instructions */
+ SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
#else
/* File: mips/binop2addr.S */
@@ -5509,8 +5698,7 @@
div zero, a0, a1 # optional op
mfhi a0 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-13 instructions */
+ SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
#endif
@@ -5545,8 +5733,7 @@
# optional op
and a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-13 instructions */
+ SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
/* ------------------------------ */
@@ -5580,8 +5767,7 @@
# optional op
or a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-13 instructions */
+ SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
/* ------------------------------ */
@@ -5615,8 +5801,7 @@
# optional op
xor a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-13 instructions */
+ SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
/* ------------------------------ */
@@ -5650,8 +5835,7 @@
# optional op
sll a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-13 instructions */
+ SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
/* ------------------------------ */
@@ -5685,8 +5869,7 @@
# optional op
sra a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-13 instructions */
+ SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
/* ------------------------------ */
@@ -5720,8 +5903,7 @@
# optional op
srl a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-13 instructions */
+ SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
/* ------------------------------ */
@@ -5736,22 +5918,21 @@
* Generic 64-bit "/2addr" binary operation. Provide an "instr" line
* that specifies an instruction that performs "result = a0-a1 op a2-a3".
* This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
+ * comes back in a register pair other than a0-a1, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
+ * vB (a2-a3). Useful for integer division and modulus.
*
* For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
* and-long/2addr, or-long/2addr, xor-long/2addr
- * rem-double/2addr
*/
/* binop/2addr vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
GET_OPB(a1) # a1 <- B
EAS2(a1, rFP, a1) # a1 <- &fp[B]
EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64(a2, a3, a1) # a2/a3 <- vBB/vBB+1
- LOAD64(a0, a1, t0) # a0/a1 <- vAA/vAA+1
+ LOAD64(a2, a3, a1) # a2/a3 <- vB/vB+1
+ LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
.if 0
or t0, a2, a3 # second arg (a2-a3) is zero?
beqz t0, common_errDivideByZero
@@ -5761,9 +5942,7 @@
addu v0, a2, a0 # optional op
addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1 # result <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64(v0, v1, rOBJ) # vAA/vAA+1 <- v0/v1
- GOTO_OPCODE(t0) # jump to next instruction
- /* 12-15 instructions */
+ SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vA/vA+1 <- v0/v1
/* ------------------------------ */
@@ -5778,22 +5957,21 @@
* Generic 64-bit "/2addr" binary operation. Provide an "instr" line
* that specifies an instruction that performs "result = a0-a1 op a2-a3".
* This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
+ * comes back in a register pair other than a0-a1, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
+ * vB (a2-a3). Useful for integer division and modulus.
*
* For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
* and-long/2addr, or-long/2addr, xor-long/2addr
- * rem-double/2addr
*/
/* binop/2addr vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
GET_OPB(a1) # a1 <- B
EAS2(a1, rFP, a1) # a1 <- &fp[B]
EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64(a2, a3, a1) # a2/a3 <- vBB/vBB+1
- LOAD64(a0, a1, t0) # a0/a1 <- vAA/vAA+1
+ LOAD64(a2, a3, a1) # a2/a3 <- vB/vB+1
+ LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
.if 0
or t0, a2, a3 # second arg (a2-a3) is zero?
beqz t0, common_errDivideByZero
@@ -5803,9 +5981,7 @@
subu v0, a0, a2 # optional op
subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0 # result <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64(v0, v1, rOBJ) # vAA/vAA+1 <- v0/v1
- GOTO_OPCODE(t0) # jump to next instruction
- /* 12-15 instructions */
+ SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vA/vA+1 <- v0/v1
/* ------------------------------ */
@@ -5840,9 +6016,7 @@
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
GET_INST_OPCODE(t1) # extract opcode from rINST
- # vAA <- v0 (low)
- SET_VREG64(v0, v1, rOBJ) # vAA+1 <- v1 (high)
- GOTO_OPCODE(t1) # jump to next instruction
+ SET_VREG64_GOTO(v0, v1, rOBJ, t1) # vA/vA+1 <- v0(low)/v1(high)
/* ------------------------------ */
.balign 128
@@ -5853,22 +6027,21 @@
* Generic 64-bit "/2addr" binary operation. Provide an "instr" line
* that specifies an instruction that performs "result = a0-a1 op a2-a3".
* This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
+ * comes back in a register pair other than a0-a1, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
+ * vB (a2-a3). Useful for integer division and modulus.
*
* For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
* and-long/2addr, or-long/2addr, xor-long/2addr
- * rem-double/2addr
*/
/* binop/2addr vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
GET_OPB(a1) # a1 <- B
EAS2(a1, rFP, a1) # a1 <- &fp[B]
EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64(a2, a3, a1) # a2/a3 <- vBB/vBB+1
- LOAD64(a0, a1, t0) # a0/a1 <- vAA/vAA+1
+ LOAD64(a2, a3, a1) # a2/a3 <- vB/vB+1
+ LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
.if 1
or t0, a2, a3 # second arg (a2-a3) is zero?
beqz t0, common_errDivideByZero
@@ -5878,9 +6051,7 @@
# optional op
JAL(__divdi3) # result <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64(v0, v1, rOBJ) # vAA/vAA+1 <- v0/v1
- GOTO_OPCODE(t0) # jump to next instruction
- /* 12-15 instructions */
+ SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vA/vA+1 <- v0/v1
/* ------------------------------ */
@@ -5892,22 +6063,21 @@
* Generic 64-bit "/2addr" binary operation. Provide an "instr" line
* that specifies an instruction that performs "result = a0-a1 op a2-a3".
* This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
+ * comes back in a register pair other than a0-a1, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
+ * vB (a2-a3). Useful for integer division and modulus.
*
* For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
* and-long/2addr, or-long/2addr, xor-long/2addr
- * rem-double/2addr
*/
/* binop/2addr vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
GET_OPB(a1) # a1 <- B
EAS2(a1, rFP, a1) # a1 <- &fp[B]
EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64(a2, a3, a1) # a2/a3 <- vBB/vBB+1
- LOAD64(a0, a1, t0) # a0/a1 <- vAA/vAA+1
+ LOAD64(a2, a3, a1) # a2/a3 <- vB/vB+1
+ LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
.if 1
or t0, a2, a3 # second arg (a2-a3) is zero?
beqz t0, common_errDivideByZero
@@ -5917,9 +6087,7 @@
# optional op
JAL(__moddi3) # result <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64(v0, v1, rOBJ) # vAA/vAA+1 <- v0/v1
- GOTO_OPCODE(t0) # jump to next instruction
- /* 12-15 instructions */
+ SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vA/vA+1 <- v0/v1
/* ------------------------------ */
@@ -5931,22 +6099,21 @@
* Generic 64-bit "/2addr" binary operation. Provide an "instr" line
* that specifies an instruction that performs "result = a0-a1 op a2-a3".
* This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
+ * comes back in a register pair other than a0-a1, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
+ * vB (a2-a3). Useful for integer division and modulus.
*
* For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
* and-long/2addr, or-long/2addr, xor-long/2addr
- * rem-double/2addr
*/
/* binop/2addr vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
GET_OPB(a1) # a1 <- B
EAS2(a1, rFP, a1) # a1 <- &fp[B]
EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64(a2, a3, a1) # a2/a3 <- vBB/vBB+1
- LOAD64(a0, a1, t0) # a0/a1 <- vAA/vAA+1
+ LOAD64(a2, a3, a1) # a2/a3 <- vB/vB+1
+ LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
.if 0
or t0, a2, a3 # second arg (a2-a3) is zero?
beqz t0, common_errDivideByZero
@@ -5956,9 +6123,7 @@
and a0, a0, a2 # optional op
and a1, a1, a3 # result <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64(a0, a1, rOBJ) # vAA/vAA+1 <- a0/a1
- GOTO_OPCODE(t0) # jump to next instruction
- /* 12-15 instructions */
+ SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vA/vA+1 <- a0/a1
/* ------------------------------ */
@@ -5970,22 +6135,21 @@
* Generic 64-bit "/2addr" binary operation. Provide an "instr" line
* that specifies an instruction that performs "result = a0-a1 op a2-a3".
* This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
+ * comes back in a register pair other than a0-a1, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
+ * vB (a2-a3). Useful for integer division and modulus.
*
* For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
* and-long/2addr, or-long/2addr, xor-long/2addr
- * rem-double/2addr
*/
/* binop/2addr vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
GET_OPB(a1) # a1 <- B
EAS2(a1, rFP, a1) # a1 <- &fp[B]
EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64(a2, a3, a1) # a2/a3 <- vBB/vBB+1
- LOAD64(a0, a1, t0) # a0/a1 <- vAA/vAA+1
+ LOAD64(a2, a3, a1) # a2/a3 <- vB/vB+1
+ LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
.if 0
or t0, a2, a3 # second arg (a2-a3) is zero?
beqz t0, common_errDivideByZero
@@ -5995,9 +6159,7 @@
or a0, a0, a2 # optional op
or a1, a1, a3 # result <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64(a0, a1, rOBJ) # vAA/vAA+1 <- a0/a1
- GOTO_OPCODE(t0) # jump to next instruction
- /* 12-15 instructions */
+ SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vA/vA+1 <- a0/a1
/* ------------------------------ */
@@ -6009,22 +6171,21 @@
* Generic 64-bit "/2addr" binary operation. Provide an "instr" line
* that specifies an instruction that performs "result = a0-a1 op a2-a3".
* This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
+ * comes back in a register pair other than a0-a1, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
+ * vB (a2-a3). Useful for integer division and modulus.
*
* For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
* and-long/2addr, or-long/2addr, xor-long/2addr
- * rem-double/2addr
*/
/* binop/2addr vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
GET_OPB(a1) # a1 <- B
EAS2(a1, rFP, a1) # a1 <- &fp[B]
EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64(a2, a3, a1) # a2/a3 <- vBB/vBB+1
- LOAD64(a0, a1, t0) # a0/a1 <- vAA/vAA+1
+ LOAD64(a2, a3, a1) # a2/a3 <- vB/vB+1
+ LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
.if 0
or t0, a2, a3 # second arg (a2-a3) is zero?
beqz t0, common_errDivideByZero
@@ -6034,9 +6195,7 @@
xor a0, a0, a2 # optional op
xor a1, a1, a3 # result <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64(a0, a1, rOBJ) # vAA/vAA+1 <- a0/a1
- GOTO_OPCODE(t0) # jump to next instruction
- /* 12-15 instructions */
+ SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vA/vA+1 <- a0/a1
/* ------------------------------ */
@@ -6052,7 +6211,7 @@
GET_OPB(a3) # a3 <- B
GET_VREG(a2, a3) # a2 <- vB
EAS2(t2, rFP, rOBJ) # t2 <- &fp[A]
- LOAD64(a0, a1, t2) # a0/a1 <- vAA/vAA+1
+ LOAD64(a0, a1, t2) # a0/a1 <- vA/vA+1
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -6065,7 +6224,7 @@
srl a0, v1 # alo<- alo >> (32-(shift&31))
sll v1, a1, a2 # rhi<- ahi << (shift&31)
or v1, a0 # rhi<- rhi | alo
- SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vAA/vAA+1 <- a0/a1
+ SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vA/vA+1 <- v0/v1
/* ------------------------------ */
.balign 128
@@ -6080,7 +6239,7 @@
GET_OPB(a3) # a3 <- B
GET_VREG(a2, a3) # a2 <- vB
EAS2(t0, rFP, t2) # t0 <- &fp[A]
- LOAD64(a0, a1, t0) # a0/a1 <- vAA/vAA+1
+ LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -6092,7 +6251,7 @@
sll a1, 1
sll a1, a0 # ahi<- ahi << (32-(shift&31))
or v0, a1 # rlo<- rlo | ahi
- SET_VREG64_GOTO(v0, v1, t2, t0) # vAA/vAA+1 <- a0/a1
+ SET_VREG64_GOTO(v0, v1, t2, t0) # vA/vA+1 <- v0/v1
/* ------------------------------ */
.balign 128
@@ -6107,7 +6266,7 @@
GET_OPB(a3) # a3 <- B
GET_VREG(a2, a3) # a2 <- vB
EAS2(t0, rFP, t3) # t0 <- &fp[A]
- LOAD64(a0, a1, t0) # a0/a1 <- vAA/vAA+1
+ LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
GET_INST_OPCODE(t0) # extract opcode from rINST
@@ -6120,7 +6279,7 @@
sll a1, 1
sll a1, a0 # ahi<- ahi << (32-(shift&31))
or v0, a1 # rlo<- rlo | ahi
- SET_VREG64_GOTO(v0, v1, t3, t0) # vAA/vAA+1 <- a0/a1
+ SET_VREG64_GOTO(v0, v1, t3, t0) # vA/vA+1 <- v0/v1
/* ------------------------------ */
.balign 128
@@ -6129,23 +6288,22 @@
/* File: mips/fbinop2addr.S */
/*
* Generic 32-bit "/2addr" binary operation. Provide an "instr"
- * that specifies an instruction that performs "result = a0 op a1".
+ * that specifies an instruction that performs "fv0 = fa0 op fa1".
* This could be an MIPS instruction or a function call.
*
* For: add-float/2addr, sub-float/2addr, mul-float/2addr,
- * div-float/2addr, rem-float/2addr
+ * div-float/2addr, rem-float/2addr
*/
/* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPA4(rOBJ) # rOBJ <- A+
GET_OPB(a3) # a3 <- B
GET_VREG_F(fa0, rOBJ)
GET_VREG_F(fa1, a3)
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
add.s fv0, fa0, fa1
- SET_VREG_F(fv0, rOBJ) # vAA <- result
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- result
/* ------------------------------ */
@@ -6155,23 +6313,22 @@
/* File: mips/fbinop2addr.S */
/*
* Generic 32-bit "/2addr" binary operation. Provide an "instr"
- * that specifies an instruction that performs "result = a0 op a1".
+ * that specifies an instruction that performs "fv0 = fa0 op fa1".
* This could be an MIPS instruction or a function call.
*
* For: add-float/2addr, sub-float/2addr, mul-float/2addr,
- * div-float/2addr, rem-float/2addr
+ * div-float/2addr, rem-float/2addr
*/
/* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPA4(rOBJ) # rOBJ <- A+
GET_OPB(a3) # a3 <- B
GET_VREG_F(fa0, rOBJ)
GET_VREG_F(fa1, a3)
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
sub.s fv0, fa0, fa1
- SET_VREG_F(fv0, rOBJ) # vAA <- result
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- result
/* ------------------------------ */
@@ -6181,23 +6338,22 @@
/* File: mips/fbinop2addr.S */
/*
* Generic 32-bit "/2addr" binary operation. Provide an "instr"
- * that specifies an instruction that performs "result = a0 op a1".
+ * that specifies an instruction that performs "fv0 = fa0 op fa1".
* This could be an MIPS instruction or a function call.
*
* For: add-float/2addr, sub-float/2addr, mul-float/2addr,
- * div-float/2addr, rem-float/2addr
+ * div-float/2addr, rem-float/2addr
*/
/* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPA4(rOBJ) # rOBJ <- A+
GET_OPB(a3) # a3 <- B
GET_VREG_F(fa0, rOBJ)
GET_VREG_F(fa1, a3)
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
mul.s fv0, fa0, fa1
- SET_VREG_F(fv0, rOBJ) # vAA <- result
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- result
/* ------------------------------ */
@@ -6207,23 +6363,22 @@
/* File: mips/fbinop2addr.S */
/*
* Generic 32-bit "/2addr" binary operation. Provide an "instr"
- * that specifies an instruction that performs "result = a0 op a1".
+ * that specifies an instruction that performs "fv0 = fa0 op fa1".
* This could be an MIPS instruction or a function call.
*
* For: add-float/2addr, sub-float/2addr, mul-float/2addr,
- * div-float/2addr, rem-float/2addr
+ * div-float/2addr, rem-float/2addr
*/
/* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPA4(rOBJ) # rOBJ <- A+
GET_OPB(a3) # a3 <- B
GET_VREG_F(fa0, rOBJ)
GET_VREG_F(fa1, a3)
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
div.s fv0, fa0, fa1
- SET_VREG_F(fv0, rOBJ) # vAA <- result
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- result
/* ------------------------------ */
@@ -6233,23 +6388,22 @@
/* File: mips/fbinop2addr.S */
/*
* Generic 32-bit "/2addr" binary operation. Provide an "instr"
- * that specifies an instruction that performs "result = a0 op a1".
+ * that specifies an instruction that performs "fv0 = fa0 op fa1".
* This could be an MIPS instruction or a function call.
*
* For: add-float/2addr, sub-float/2addr, mul-float/2addr,
- * div-float/2addr, rem-float/2addr
+ * div-float/2addr, rem-float/2addr
*/
/* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPA4(rOBJ) # rOBJ <- A+
GET_OPB(a3) # a3 <- B
GET_VREG_F(fa0, rOBJ)
GET_VREG_F(fa1, a3)
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
JAL(fmodf)
- SET_VREG_F(fv0, rOBJ) # vAA <- result
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- result
/* ------------------------------ */
@@ -6258,12 +6412,13 @@
/* File: mips/op_add_double_2addr.S */
/* File: mips/fbinopWide2addr.S */
/*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * Generic 64-bit floating-point "/2addr" binary operation.
+ * Provide an "instr" line that specifies an instruction that
+ * performs "fv0 = fa0 op fa1".
* This could be an MIPS instruction or a function call.
*
* For: add-double/2addr, sub-double/2addr, mul-double/2addr,
- * div-double/2addr, rem-double/2addr
+ * div-double/2addr, rem-double/2addr
*/
/* binop/2addr vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
@@ -6275,9 +6430,8 @@
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
add.d fv0, fa0, fa1
- SET_VREG64_F(fv0, fv0f, rOBJ)
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- fv0
/* ------------------------------ */
@@ -6286,12 +6440,13 @@
/* File: mips/op_sub_double_2addr.S */
/* File: mips/fbinopWide2addr.S */
/*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * Generic 64-bit floating-point "/2addr" binary operation.
+ * Provide an "instr" line that specifies an instruction that
+ * performs "fv0 = fa0 op fa1".
* This could be an MIPS instruction or a function call.
*
* For: add-double/2addr, sub-double/2addr, mul-double/2addr,
- * div-double/2addr, rem-double/2addr
+ * div-double/2addr, rem-double/2addr
*/
/* binop/2addr vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
@@ -6303,9 +6458,8 @@
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
sub.d fv0, fa0, fa1
- SET_VREG64_F(fv0, fv0f, rOBJ)
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- fv0
/* ------------------------------ */
@@ -6314,12 +6468,13 @@
/* File: mips/op_mul_double_2addr.S */
/* File: mips/fbinopWide2addr.S */
/*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * Generic 64-bit floating-point "/2addr" binary operation.
+ * Provide an "instr" line that specifies an instruction that
+ * performs "fv0 = fa0 op fa1".
* This could be an MIPS instruction or a function call.
*
* For: add-double/2addr, sub-double/2addr, mul-double/2addr,
- * div-double/2addr, rem-double/2addr
+ * div-double/2addr, rem-double/2addr
*/
/* binop/2addr vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
@@ -6331,9 +6486,8 @@
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
mul.d fv0, fa0, fa1
- SET_VREG64_F(fv0, fv0f, rOBJ)
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- fv0
/* ------------------------------ */
@@ -6342,12 +6496,13 @@
/* File: mips/op_div_double_2addr.S */
/* File: mips/fbinopWide2addr.S */
/*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * Generic 64-bit floating-point "/2addr" binary operation.
+ * Provide an "instr" line that specifies an instruction that
+ * performs "fv0 = fa0 op fa1".
* This could be an MIPS instruction or a function call.
*
* For: add-double/2addr, sub-double/2addr, mul-double/2addr,
- * div-double/2addr, rem-double/2addr
+ * div-double/2addr, rem-double/2addr
*/
/* binop/2addr vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
@@ -6359,9 +6514,8 @@
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
div.d fv0, fa0, fa1
- SET_VREG64_F(fv0, fv0f, rOBJ)
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- fv0
/* ------------------------------ */
@@ -6370,12 +6524,13 @@
/* File: mips/op_rem_double_2addr.S */
/* File: mips/fbinopWide2addr.S */
/*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * Generic 64-bit floating-point "/2addr" binary operation.
+ * Provide an "instr" line that specifies an instruction that
+ * performs "fv0 = fa0 op fa1".
* This could be an MIPS instruction or a function call.
*
* For: add-double/2addr, sub-double/2addr, mul-double/2addr,
- * div-double/2addr, rem-double/2addr
+ * div-double/2addr, rem-double/2addr
*/
/* binop/2addr vA, vB */
GET_OPA4(rOBJ) # rOBJ <- A+
@@ -6387,9 +6542,8 @@
FETCH_ADVANCE_INST(1) # advance rPC, load rINST
JAL(fmod)
- SET_VREG64_F(fv0, fv0f, rOBJ)
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- fv0
/* ------------------------------ */
@@ -6409,12 +6563,11 @@
* For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
* rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
*/
- # binop/lit16 vA, vB, /* +CCCC */
+ /* binop/lit16 vA, vB, +CCCC */
FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
GET_OPB(a2) # a2 <- B
- GET_OPA(rOBJ) # rOBJ <- A+
+ GET_OPA4(rOBJ) # rOBJ <- A+
GET_VREG(a0, a2) # a0 <- vB
- and rOBJ, rOBJ, 15
.if 0
# cmp a1, 0; is second operand zero?
beqz a1, common_errDivideByZero
@@ -6424,8 +6577,7 @@
# optional op
addu a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-13 instructions */
+ SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
/* ------------------------------ */
@@ -6446,12 +6598,11 @@
* For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
* rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
*/
- # binop/lit16 vA, vB, /* +CCCC */
+ /* binop/lit16 vA, vB, +CCCC */
FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
GET_OPB(a2) # a2 <- B
- GET_OPA(rOBJ) # rOBJ <- A+
+ GET_OPA4(rOBJ) # rOBJ <- A+
GET_VREG(a0, a2) # a0 <- vB
- and rOBJ, rOBJ, 15
.if 0
# cmp a1, 0; is second operand zero?
beqz a1, common_errDivideByZero
@@ -6461,8 +6612,7 @@
# optional op
subu a0, a1, a0 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-13 instructions */
+ SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
/* ------------------------------ */
@@ -6482,12 +6632,11 @@
* For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
* rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
*/
- # binop/lit16 vA, vB, /* +CCCC */
+ /* binop/lit16 vA, vB, +CCCC */
FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
GET_OPB(a2) # a2 <- B
- GET_OPA(rOBJ) # rOBJ <- A+
+ GET_OPA4(rOBJ) # rOBJ <- A+
GET_VREG(a0, a2) # a0 <- vB
- and rOBJ, rOBJ, 15
.if 0
# cmp a1, 0; is second operand zero?
beqz a1, common_errDivideByZero
@@ -6497,8 +6646,7 @@
# optional op
mul a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-13 instructions */
+ SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
/* ------------------------------ */
@@ -6519,12 +6667,11 @@
* For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
* rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
*/
- # binop/lit16 vA, vB, /* +CCCC */
+ /* binop/lit16 vA, vB, +CCCC */
FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
GET_OPB(a2) # a2 <- B
- GET_OPA(rOBJ) # rOBJ <- A+
+ GET_OPA4(rOBJ) # rOBJ <- A+
GET_VREG(a0, a2) # a0 <- vB
- and rOBJ, rOBJ, 15
.if 1
# cmp a1, 0; is second operand zero?
beqz a1, common_errDivideByZero
@@ -6534,8 +6681,7 @@
# optional op
div a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-13 instructions */
+ SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
#else
/* File: mips/binopLit16.S */
@@ -6551,12 +6697,11 @@
* For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
* rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
*/
- # binop/lit16 vA, vB, /* +CCCC */
+ /* binop/lit16 vA, vB, +CCCC */
FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
GET_OPB(a2) # a2 <- B
- GET_OPA(rOBJ) # rOBJ <- A+
+ GET_OPA4(rOBJ) # rOBJ <- A+
GET_VREG(a0, a2) # a0 <- vB
- and rOBJ, rOBJ, 15
.if 1
# cmp a1, 0; is second operand zero?
beqz a1, common_errDivideByZero
@@ -6566,8 +6711,7 @@
div zero, a0, a1 # optional op
mflo a0 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-13 instructions */
+ SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
#endif
@@ -6589,12 +6733,11 @@
* For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
* rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
*/
- # binop/lit16 vA, vB, /* +CCCC */
+ /* binop/lit16 vA, vB, +CCCC */
FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
GET_OPB(a2) # a2 <- B
- GET_OPA(rOBJ) # rOBJ <- A+
+ GET_OPA4(rOBJ) # rOBJ <- A+
GET_VREG(a0, a2) # a0 <- vB
- and rOBJ, rOBJ, 15
.if 1
# cmp a1, 0; is second operand zero?
beqz a1, common_errDivideByZero
@@ -6604,8 +6747,7 @@
# optional op
mod a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-13 instructions */
+ SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
#else
/* File: mips/binopLit16.S */
@@ -6621,12 +6763,11 @@
* For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
* rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
*/
- # binop/lit16 vA, vB, /* +CCCC */
+ /* binop/lit16 vA, vB, +CCCC */
FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
GET_OPB(a2) # a2 <- B
- GET_OPA(rOBJ) # rOBJ <- A+
+ GET_OPA4(rOBJ) # rOBJ <- A+
GET_VREG(a0, a2) # a0 <- vB
- and rOBJ, rOBJ, 15
.if 1
# cmp a1, 0; is second operand zero?
beqz a1, common_errDivideByZero
@@ -6636,8 +6777,7 @@
div zero, a0, a1 # optional op
mfhi a0 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-13 instructions */
+ SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
#endif
@@ -6658,12 +6798,11 @@
* For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
* rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
*/
- # binop/lit16 vA, vB, /* +CCCC */
+ /* binop/lit16 vA, vB, +CCCC */
FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
GET_OPB(a2) # a2 <- B
- GET_OPA(rOBJ) # rOBJ <- A+
+ GET_OPA4(rOBJ) # rOBJ <- A+
GET_VREG(a0, a2) # a0 <- vB
- and rOBJ, rOBJ, 15
.if 0
# cmp a1, 0; is second operand zero?
beqz a1, common_errDivideByZero
@@ -6673,8 +6812,7 @@
# optional op
and a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-13 instructions */
+ SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
/* ------------------------------ */
@@ -6694,12 +6832,11 @@
* For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
* rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
*/
- # binop/lit16 vA, vB, /* +CCCC */
+ /* binop/lit16 vA, vB, +CCCC */
FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
GET_OPB(a2) # a2 <- B
- GET_OPA(rOBJ) # rOBJ <- A+
+ GET_OPA4(rOBJ) # rOBJ <- A+
GET_VREG(a0, a2) # a0 <- vB
- and rOBJ, rOBJ, 15
.if 0
# cmp a1, 0; is second operand zero?
beqz a1, common_errDivideByZero
@@ -6709,8 +6846,7 @@
# optional op
or a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-13 instructions */
+ SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
/* ------------------------------ */
@@ -6730,12 +6866,11 @@
* For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
* rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
*/
- # binop/lit16 vA, vB, /* +CCCC */
+ /* binop/lit16 vA, vB, +CCCC */
FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
GET_OPB(a2) # a2 <- B
- GET_OPA(rOBJ) # rOBJ <- A+
+ GET_OPA4(rOBJ) # rOBJ <- A+
GET_VREG(a0, a2) # a0 <- vB
- and rOBJ, rOBJ, 15
.if 0
# cmp a1, 0; is second operand zero?
beqz a1, common_errDivideByZero
@@ -6745,8 +6880,7 @@
# optional op
xor a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-13 instructions */
+ SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
/* ------------------------------ */
@@ -6767,7 +6901,7 @@
* rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
- # binop/lit8 vAA, vBB, /* +CC */
+ /* binop/lit8 vAA, vBB, +CC */
FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
GET_OPA(rOBJ) # rOBJ <- AA
and a2, a3, 255 # a2 <- BB
@@ -6783,7 +6917,6 @@
addu a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-12 instructions */
/* ------------------------------ */
@@ -6804,7 +6937,7 @@
* rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
- # binop/lit8 vAA, vBB, /* +CC */
+ /* binop/lit8 vAA, vBB, +CC */
FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
GET_OPA(rOBJ) # rOBJ <- AA
and a2, a3, 255 # a2 <- BB
@@ -6820,7 +6953,6 @@
subu a0, a1, a0 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-12 instructions */
/* ------------------------------ */
@@ -6841,7 +6973,7 @@
* rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
- # binop/lit8 vAA, vBB, /* +CC */
+ /* binop/lit8 vAA, vBB, +CC */
FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
GET_OPA(rOBJ) # rOBJ <- AA
and a2, a3, 255 # a2 <- BB
@@ -6857,7 +6989,6 @@
mul a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-12 instructions */
/* ------------------------------ */
@@ -6879,7 +7010,7 @@
* rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
- # binop/lit8 vAA, vBB, /* +CC */
+ /* binop/lit8 vAA, vBB, +CC */
FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
GET_OPA(rOBJ) # rOBJ <- AA
and a2, a3, 255 # a2 <- BB
@@ -6895,7 +7026,6 @@
div a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-12 instructions */
#else
/* File: mips/binopLit8.S */
@@ -6912,7 +7042,7 @@
* rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
- # binop/lit8 vAA, vBB, /* +CC */
+ /* binop/lit8 vAA, vBB, +CC */
FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
GET_OPA(rOBJ) # rOBJ <- AA
and a2, a3, 255 # a2 <- BB
@@ -6928,7 +7058,6 @@
mflo a0 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-12 instructions */
#endif
@@ -6951,7 +7080,7 @@
* rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
- # binop/lit8 vAA, vBB, /* +CC */
+ /* binop/lit8 vAA, vBB, +CC */
FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
GET_OPA(rOBJ) # rOBJ <- AA
and a2, a3, 255 # a2 <- BB
@@ -6967,7 +7096,6 @@
mod a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-12 instructions */
#else
/* File: mips/binopLit8.S */
@@ -6984,7 +7112,7 @@
* rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
- # binop/lit8 vAA, vBB, /* +CC */
+ /* binop/lit8 vAA, vBB, +CC */
FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
GET_OPA(rOBJ) # rOBJ <- AA
and a2, a3, 255 # a2 <- BB
@@ -7000,7 +7128,6 @@
mfhi a0 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-12 instructions */
#endif
@@ -7022,7 +7149,7 @@
* rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
- # binop/lit8 vAA, vBB, /* +CC */
+ /* binop/lit8 vAA, vBB, +CC */
FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
GET_OPA(rOBJ) # rOBJ <- AA
and a2, a3, 255 # a2 <- BB
@@ -7038,7 +7165,6 @@
and a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-12 instructions */
/* ------------------------------ */
@@ -7059,7 +7185,7 @@
* rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
- # binop/lit8 vAA, vBB, /* +CC */
+ /* binop/lit8 vAA, vBB, +CC */
FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
GET_OPA(rOBJ) # rOBJ <- AA
and a2, a3, 255 # a2 <- BB
@@ -7075,7 +7201,6 @@
or a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-12 instructions */
/* ------------------------------ */
@@ -7096,7 +7221,7 @@
* rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
- # binop/lit8 vAA, vBB, /* +CC */
+ /* binop/lit8 vAA, vBB, +CC */
FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
GET_OPA(rOBJ) # rOBJ <- AA
and a2, a3, 255 # a2 <- BB
@@ -7112,7 +7237,6 @@
xor a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-12 instructions */
/* ------------------------------ */
@@ -7133,7 +7257,7 @@
* rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
- # binop/lit8 vAA, vBB, /* +CC */
+ /* binop/lit8 vAA, vBB, +CC */
FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
GET_OPA(rOBJ) # rOBJ <- AA
and a2, a3, 255 # a2 <- BB
@@ -7149,7 +7273,6 @@
sll a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-12 instructions */
/* ------------------------------ */
@@ -7170,7 +7293,7 @@
* rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
- # binop/lit8 vAA, vBB, /* +CC */
+ /* binop/lit8 vAA, vBB, +CC */
FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
GET_OPA(rOBJ) # rOBJ <- AA
and a2, a3, 255 # a2 <- BB
@@ -7186,7 +7309,6 @@
sra a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-12 instructions */
/* ------------------------------ */
@@ -7207,7 +7329,7 @@
* rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
- # binop/lit8 vAA, vBB, /* +CC */
+ /* binop/lit8 vAA, vBB, +CC */
FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
GET_OPA(rOBJ) # rOBJ <- AA
and a2, a3, 255 # a2 <- BB
@@ -7223,7 +7345,6 @@
srl a0, a0, a1 # a0 <- op, a0-a3 changed
GET_INST_OPCODE(t0) # extract opcode from rINST
SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
- /* 10-12 instructions */
/* ------------------------------ */
@@ -7231,7 +7352,7 @@
.L_op_iget_quick: /* 0xe3 */
/* File: mips/op_iget_quick.S */
/* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- # op vA, vB, offset /* CCCC */
+ /* op vA, vB, offset@CCCC */
GET_OPB(a2) # a2 <- B
GET_VREG(a3, a2) # a3 <- object we're operating on
FETCH(a1, 1) # a1 <- field byte offset
@@ -7248,7 +7369,7 @@
.balign 128
.L_op_iget_wide_quick: /* 0xe4 */
/* File: mips/op_iget_wide_quick.S */
- # iget-wide-quick vA, vB, offset /* CCCC */
+ /* iget-wide-quick vA, vB, offset@CCCC */
GET_OPB(a2) # a2 <- B
GET_VREG(a3, a2) # a3 <- object we're operating on
FETCH(a1, 1) # a1 <- field byte offset
@@ -7259,8 +7380,7 @@
LOAD64(a0, a1, t0) # a0 <- obj.field (64 bits, aligned)
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64(a0, a1, a2) # fp[A] <- a0/a1
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_GOTO(a0, a1, a2, t0) # fp[A] <- a0/a1
/* ------------------------------ */
.balign 128
@@ -7277,17 +7397,16 @@
GET_OPA4(a2) # a2<- A+
PREFETCH_INST(2) # load rINST
bnez a3, MterpPossibleException # bail out
- SET_VREG_OBJECT(v0, a2) # fp[A] <- v0
ADVANCE(2) # advance rPC
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[A] <- v0
/* ------------------------------ */
.balign 128
.L_op_iput_quick: /* 0xe6 */
/* File: mips/op_iput_quick.S */
/* For: iput-quick, iput-object-quick */
- # op vA, vB, offset /* CCCC */
+ /* op vA, vB, offset@CCCC */
GET_OPB(a2) # a2 <- B
GET_VREG(a3, a2) # a3 <- fp[B], the object pointer
FETCH(a1, 1) # a1 <- field byte offset
@@ -7296,15 +7415,16 @@
GET_VREG(a0, a2) # a0 <- fp[A]
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
addu t0, a3, a1
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ GET_OPCODE_TARGET(t1)
sw a0, 0(t0) # obj.field (8/16/32 bits) <- a0
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ JR(t1) # jump to next instruction
/* ------------------------------ */
.balign 128
.L_op_iput_wide_quick: /* 0xe7 */
/* File: mips/op_iput_wide_quick.S */
- # iput-wide-quick vA, vB, offset /* CCCC */
+ /* iput-wide-quick vA, vB, offset@CCCC */
GET_OPA4(a0) # a0 <- A(+)
GET_OPB(a1) # a1 <- B
GET_VREG(a2, a1) # a2 <- fp[B], the object pointer
@@ -7315,16 +7435,17 @@
FETCH(a3, 1) # a3 <- field byte offset
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
addu a2, a2, a3 # obj.field (64 bits, aligned) <- a0/a1
- STORE64(a0, a1, a2) # obj.field (64 bits, aligned) <- a0/a1
GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ GET_OPCODE_TARGET(t0)
+ STORE64(a0, a1, a2) # obj.field (64 bits, aligned) <- a0/a1
+ JR(t0) # jump to next instruction
/* ------------------------------ */
.balign 128
.L_op_iput_object_quick: /* 0xe8 */
/* File: mips/op_iput_object_quick.S */
/* For: iput-object-quick */
- # op vA, vB, offset /* CCCC */
+ /* op vA, vB, offset@CCCC */
EXPORT_PC()
addu a0, rFP, OFF_FP_SHADOWFRAME
move a1, rPC
@@ -7343,8 +7464,8 @@
/*
* Generic invoke handler wrapper.
*/
- # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
- # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
.extern MterpInvokeVirtualQuick
EXPORT_PC()
move a0, rSELF
@@ -7368,8 +7489,8 @@
/*
* Generic invoke handler wrapper.
*/
- # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
- # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
.extern MterpInvokeVirtualQuickRange
EXPORT_PC()
move a0, rSELF
@@ -7391,7 +7512,7 @@
/* File: mips/op_iput_boolean_quick.S */
/* File: mips/op_iput_quick.S */
/* For: iput-quick, iput-object-quick */
- # op vA, vB, offset /* CCCC */
+ /* op vA, vB, offset@CCCC */
GET_OPB(a2) # a2 <- B
GET_VREG(a3, a2) # a3 <- fp[B], the object pointer
FETCH(a1, 1) # a1 <- field byte offset
@@ -7400,9 +7521,10 @@
GET_VREG(a0, a2) # a0 <- fp[A]
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
addu t0, a3, a1
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ GET_OPCODE_TARGET(t1)
sb a0, 0(t0) # obj.field (8/16/32 bits) <- a0
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ JR(t1) # jump to next instruction
/* ------------------------------ */
@@ -7411,7 +7533,7 @@
/* File: mips/op_iput_byte_quick.S */
/* File: mips/op_iput_quick.S */
/* For: iput-quick, iput-object-quick */
- # op vA, vB, offset /* CCCC */
+ /* op vA, vB, offset@CCCC */
GET_OPB(a2) # a2 <- B
GET_VREG(a3, a2) # a3 <- fp[B], the object pointer
FETCH(a1, 1) # a1 <- field byte offset
@@ -7420,9 +7542,10 @@
GET_VREG(a0, a2) # a0 <- fp[A]
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
addu t0, a3, a1
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ GET_OPCODE_TARGET(t1)
sb a0, 0(t0) # obj.field (8/16/32 bits) <- a0
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ JR(t1) # jump to next instruction
/* ------------------------------ */
@@ -7431,7 +7554,7 @@
/* File: mips/op_iput_char_quick.S */
/* File: mips/op_iput_quick.S */
/* For: iput-quick, iput-object-quick */
- # op vA, vB, offset /* CCCC */
+ /* op vA, vB, offset@CCCC */
GET_OPB(a2) # a2 <- B
GET_VREG(a3, a2) # a3 <- fp[B], the object pointer
FETCH(a1, 1) # a1 <- field byte offset
@@ -7440,9 +7563,10 @@
GET_VREG(a0, a2) # a0 <- fp[A]
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
addu t0, a3, a1
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ GET_OPCODE_TARGET(t1)
sh a0, 0(t0) # obj.field (8/16/32 bits) <- a0
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ JR(t1) # jump to next instruction
/* ------------------------------ */
@@ -7451,7 +7575,7 @@
/* File: mips/op_iput_short_quick.S */
/* File: mips/op_iput_quick.S */
/* For: iput-quick, iput-object-quick */
- # op vA, vB, offset /* CCCC */
+ /* op vA, vB, offset@CCCC */
GET_OPB(a2) # a2 <- B
GET_VREG(a3, a2) # a3 <- fp[B], the object pointer
FETCH(a1, 1) # a1 <- field byte offset
@@ -7460,9 +7584,10 @@
GET_VREG(a0, a2) # a0 <- fp[A]
FETCH_ADVANCE_INST(2) # advance rPC, load rINST
addu t0, a3, a1
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ GET_OPCODE_TARGET(t1)
sh a0, 0(t0) # obj.field (8/16/32 bits) <- a0
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
+ JR(t1) # jump to next instruction
/* ------------------------------ */
@@ -7471,7 +7596,7 @@
/* File: mips/op_iget_boolean_quick.S */
/* File: mips/op_iget_quick.S */
/* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- # op vA, vB, offset /* CCCC */
+ /* op vA, vB, offset@CCCC */
GET_OPB(a2) # a2 <- B
GET_VREG(a3, a2) # a3 <- object we're operating on
FETCH(a1, 1) # a1 <- field byte offset
@@ -7491,7 +7616,7 @@
/* File: mips/op_iget_byte_quick.S */
/* File: mips/op_iget_quick.S */
/* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- # op vA, vB, offset /* CCCC */
+ /* op vA, vB, offset@CCCC */
GET_OPB(a2) # a2 <- B
GET_VREG(a3, a2) # a3 <- object we're operating on
FETCH(a1, 1) # a1 <- field byte offset
@@ -7511,7 +7636,7 @@
/* File: mips/op_iget_char_quick.S */
/* File: mips/op_iget_quick.S */
/* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- # op vA, vB, offset /* CCCC */
+ /* op vA, vB, offset@CCCC */
GET_OPB(a2) # a2 <- B
GET_VREG(a3, a2) # a3 <- object we're operating on
FETCH(a1, 1) # a1 <- field byte offset
@@ -7531,7 +7656,7 @@
/* File: mips/op_iget_short_quick.S */
/* File: mips/op_iget_quick.S */
/* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- # op vA, vB, offset /* CCCC */
+ /* op vA, vB, offset@CCCC */
GET_OPB(a2) # a2 <- B
GET_VREG(a3, a2) # a3 <- object we're operating on
FETCH(a1, 1) # a1 <- field byte offset
@@ -7694,264 +7819,29 @@
.balign 4
artMterpAsmSisterStart:
-/* continuation for op_cmpl_float */
-
-.Lop_cmpl_float_nan:
- li rTEMP, -1
-
-.Lop_cmpl_float_finish:
- GET_OPA(rOBJ)
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
-
-/* continuation for op_cmpg_float */
-
-.Lop_cmpg_float_nan:
- li rTEMP, 1
-
-.Lop_cmpg_float_finish:
- GET_OPA(rOBJ)
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
-
-/* continuation for op_cmpl_double */
-
-.Lop_cmpl_double_nan:
- li rTEMP, -1
-
-.Lop_cmpl_double_finish:
- GET_OPA(rOBJ)
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
-
-/* continuation for op_cmpg_double */
-
-.Lop_cmpg_double_nan:
- li rTEMP, 1
-
-.Lop_cmpg_double_finish:
- GET_OPA(rOBJ)
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
-
-/* continuation for op_float_to_int */
-
-/*
- * Not an entry point as it is used only once !!
- */
-f2i_doconv:
-#ifdef MIPS32REVGE6
- l.s fa1, .LFLOAT_TO_INT_max
- cmp.le.s ft2, fa1, fa0
- l.s fv0, .LFLOAT_TO_INT_ret_max
- bc1nez ft2, .Lop_float_to_int_set_vreg_f
-
- l.s fa1, .LFLOAT_TO_INT_min
- cmp.le.s ft2, fa0, fa1
- l.s fv0, .LFLOAT_TO_INT_ret_min
- bc1nez ft2, .Lop_float_to_int_set_vreg_f
-
- mov.s fa1, fa0
- cmp.un.s ft2, fa0, fa1
- li.s fv0, 0
- bc1nez ft2, .Lop_float_to_int_set_vreg_f
-#else
- l.s fa1, .LFLOAT_TO_INT_max
- c.ole.s fcc0, fa1, fa0
- l.s fv0, .LFLOAT_TO_INT_ret_max
- bc1t .Lop_float_to_int_set_vreg_f
-
- l.s fa1, .LFLOAT_TO_INT_min
- c.ole.s fcc0, fa0, fa1
- l.s fv0, .LFLOAT_TO_INT_ret_min
- bc1t .Lop_float_to_int_set_vreg_f
-
- mov.s fa1, fa0
- c.un.s fcc0, fa0, fa1
- li.s fv0, 0
- bc1t .Lop_float_to_int_set_vreg_f
-#endif
-
- trunc.w.s fv0, fa0
- b .Lop_float_to_int_set_vreg_f
-
-.LFLOAT_TO_INT_max:
- .word 0x4f000000
-.LFLOAT_TO_INT_min:
- .word 0xcf000000
-.LFLOAT_TO_INT_ret_max:
- .word 0x7fffffff
-.LFLOAT_TO_INT_ret_min:
- .word 0x80000000
-
/* continuation for op_float_to_long */
-f2l_doconv:
-#ifdef MIPS32REVGE6
- l.s fa1, .LLONG_TO_max
- cmp.le.s ft2, fa1, fa0
- li rRESULT0, ~0
- li rRESULT1, ~0x80000000
- bc1nez ft2, .Lop_float_to_long_set_vreg
-
- l.s fa1, .LLONG_TO_min
- cmp.le.s ft2, fa0, fa1
- li rRESULT0, 0
- li rRESULT1, 0x80000000
- bc1nez ft2, .Lop_float_to_long_set_vreg
-
- mov.s fa1, fa0
- cmp.un.s ft2, fa0, fa1
- li rRESULT0, 0
- li rRESULT1, 0
- bc1nez ft2, .Lop_float_to_long_set_vreg
-#else
- l.s fa1, .LLONG_TO_max
- c.ole.s fcc0, fa1, fa0
- li rRESULT0, ~0
- li rRESULT1, ~0x80000000
- bc1t .Lop_float_to_long_set_vreg
-
- l.s fa1, .LLONG_TO_min
- c.ole.s fcc0, fa0, fa1
- li rRESULT0, 0
- li rRESULT1, 0x80000000
- bc1t .Lop_float_to_long_set_vreg
-
- mov.s fa1, fa0
- c.un.s fcc0, fa0, fa1
- li rRESULT0, 0
- li rRESULT1, 0
- bc1t .Lop_float_to_long_set_vreg
+#ifndef MIPS32REVGE6
+.Lop_float_to_long_get_opcode:
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+.Lop_float_to_long_set_vreg:
+ SET_VREG64_GOTO(rRESULT0, rRESULT1, rOBJ, t1) # vA/vA+1 <- v0/v1
#endif
- JAL(__fixsfdi)
-
- b .Lop_float_to_long_set_vreg
-
-.LLONG_TO_max:
- .word 0x5f000000
-
-.LLONG_TO_min:
- .word 0xdf000000
-
-/* continuation for op_double_to_int */
-
-d2i_doconv:
-#ifdef MIPS32REVGE6
- la t0, .LDOUBLE_TO_INT_max
- LOAD64_F(fa1, fa1f, t0)
- cmp.le.d ft2, fa1, fa0
- l.s fv0, .LDOUBLE_TO_INT_maxret
- bc1nez ft2, .Lop_double_to_int_set_vreg_f
-
- la t0, .LDOUBLE_TO_INT_min
- LOAD64_F(fa1, fa1f, t0)
- cmp.le.d ft2, fa0, fa1
- l.s fv0, .LDOUBLE_TO_INT_minret
- bc1nez ft2, .Lop_double_to_int_set_vreg_f
-
- mov.d fa1, fa0
- cmp.un.d ft2, fa0, fa1
- li.s fv0, 0
- bc1nez ft2, .Lop_double_to_int_set_vreg_f
-#else
- la t0, .LDOUBLE_TO_INT_max
- LOAD64_F(fa1, fa1f, t0)
- c.ole.d fcc0, fa1, fa0
- l.s fv0, .LDOUBLE_TO_INT_maxret
- bc1t .Lop_double_to_int_set_vreg_f
-
- la t0, .LDOUBLE_TO_INT_min
- LOAD64_F(fa1, fa1f, t0)
- c.ole.d fcc0, fa0, fa1
- l.s fv0, .LDOUBLE_TO_INT_minret
- bc1t .Lop_double_to_int_set_vreg_f
-
- mov.d fa1, fa0
- c.un.d fcc0, fa0, fa1
- li.s fv0, 0
- bc1t .Lop_double_to_int_set_vreg_f
-#endif
-
- trunc.w.d fv0, fa0
- b .Lop_double_to_int_set_vreg_f
-
-.LDOUBLE_TO_INT_max:
- .dword 0x41dfffffffc00000
-.LDOUBLE_TO_INT_min:
- .dword 0xc1e0000000000000 # minint, as a double (high word)
-.LDOUBLE_TO_INT_maxret:
- .word 0x7fffffff
-.LDOUBLE_TO_INT_minret:
- .word 0x80000000
-
/* continuation for op_double_to_long */
-d2l_doconv:
-#ifdef MIPS32REVGE6
- la t0, .LDOUBLE_TO_LONG_max
- LOAD64_F(fa1, fa1f, t0)
- cmp.le.d ft2, fa1, fa0
- la t0, .LDOUBLE_TO_LONG_ret_max
- LOAD64(rRESULT0, rRESULT1, t0)
- bc1nez ft2, .Lop_double_to_long_set_vreg
-
- la t0, .LDOUBLE_TO_LONG_min
- LOAD64_F(fa1, fa1f, t0)
- cmp.le.d ft2, fa0, fa1
- la t0, .LDOUBLE_TO_LONG_ret_min
- LOAD64(rRESULT0, rRESULT1, t0)
- bc1nez ft2, .Lop_double_to_long_set_vreg
-
- mov.d fa1, fa0
- cmp.un.d ft2, fa0, fa1
- li rRESULT0, 0
- li rRESULT1, 0
- bc1nez ft2, .Lop_double_to_long_set_vreg
-#else
- la t0, .LDOUBLE_TO_LONG_max
- LOAD64_F(fa1, fa1f, t0)
- c.ole.d fcc0, fa1, fa0
- la t0, .LDOUBLE_TO_LONG_ret_max
- LOAD64(rRESULT0, rRESULT1, t0)
- bc1t .Lop_double_to_long_set_vreg
-
- la t0, .LDOUBLE_TO_LONG_min
- LOAD64_F(fa1, fa1f, t0)
- c.ole.d fcc0, fa0, fa1
- la t0, .LDOUBLE_TO_LONG_ret_min
- LOAD64(rRESULT0, rRESULT1, t0)
- bc1t .Lop_double_to_long_set_vreg
-
- mov.d fa1, fa0
- c.un.d fcc0, fa0, fa1
- li rRESULT0, 0
- li rRESULT1, 0
- bc1t .Lop_double_to_long_set_vreg
+#ifndef MIPS32REVGE6
+.Lop_double_to_long_get_opcode:
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+.Lop_double_to_long_set_vreg:
+ SET_VREG64_GOTO(rRESULT0, rRESULT1, rOBJ, t1) # vA/vA+1 <- v0/v1
#endif
- JAL(__fixdfdi)
- b .Lop_double_to_long_set_vreg
-
-.LDOUBLE_TO_LONG_max:
- .dword 0x43e0000000000000 # maxlong, as a double (high word)
-.LDOUBLE_TO_LONG_min:
- .dword 0xc3e0000000000000 # minlong, as a double (high word)
-.LDOUBLE_TO_LONG_ret_max:
- .dword 0x7fffffffffffffff
-.LDOUBLE_TO_LONG_ret_min:
- .dword 0x8000000000000000
/* continuation for op_mul_long */
.Lop_mul_long_finish:
GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64(v0, v1, a0) # vAA::vAA+1 <- v0(low) :: v1(high)
- GOTO_OPCODE(t0) # jump to next instruction
+ SET_VREG64_GOTO(v0, v1, a0, t0) # vAA/vAA+1 <- v0(low)/v1(high)
/* continuation for op_shl_long */
@@ -7969,51 +7859,21 @@
.Lop_ushr_long_finish:
SET_VREG64_GOTO(v1, zero, rOBJ, t0) # vAA/vAA+1 <- rlo/rhi
-/* continuation for op_add_double */
-
-.Lop_add_double_finish:
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* continuation for op_sub_double */
-
-.Lop_sub_double_finish:
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* continuation for op_mul_double */
-
-.Lop_mul_double_finish:
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* continuation for op_div_double */
-
-.Lop_div_double_finish:
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* continuation for op_rem_double */
-
-.Lop_rem_double_finish:
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
/* continuation for op_shl_long_2addr */
.Lop_shl_long_2addr_finish:
- SET_VREG64_GOTO(zero, v0, rOBJ, t0) # vAA/vAA+1 <- rlo/rhi
+ SET_VREG64_GOTO(zero, v0, rOBJ, t0) # vA/vA+1 <- rlo/rhi
/* continuation for op_shr_long_2addr */
.Lop_shr_long_2addr_finish:
sra a3, a1, 31 # a3<- sign(ah)
- SET_VREG64_GOTO(v1, a3, t2, t0) # vAA/vAA+1 <- rlo/rhi
+ SET_VREG64_GOTO(v1, a3, t2, t0) # vA/vA+1 <- rlo/rhi
/* continuation for op_ushr_long_2addr */
.Lop_ushr_long_2addr_finish:
- SET_VREG64_GOTO(v1, zero, t3, t0) # vAA/vAA+1 <- rlo/rhi
+ SET_VREG64_GOTO(v1, zero, t3, t0) # vA/vA+1 <- rlo/rhi
.size artMterpAsmSisterStart, .-artMterpAsmSisterStart
.global artMterpAsmSisterEnd
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 6a357b3..03d6487 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -18,6 +18,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "class_ext.h"
#include "class_linker-inl.h"
#include "class_loader.h"
#include "class-inl.h"
@@ -29,6 +30,7 @@
#include "method.h"
#include "object_array-inl.h"
#include "object-inl.h"
+#include "object_lock.h"
#include "runtime.h"
#include "thread.h"
#include "throwable.h"
@@ -58,12 +60,23 @@
java_lang_Class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
}
-inline void Class::SetVerifyError(ObjPtr<Object> error) {
- CHECK(error != nullptr) << PrettyClass();
+ClassExt* Class::GetExtData() {
+ return GetFieldObject<ClassExt>(OFFSET_OF_OBJECT_MEMBER(Class, ext_data_));
+}
+
+void Class::SetExtData(ObjPtr<ClassExt> ext) {
+ CHECK(ext != nullptr) << PrettyClass();
+ // TODO It might be wise to just create an internal (global?) mutex that we synchronize on instead
+ // to prevent any possibility of deadlocks with java code. Alternatively we might want to come up
+ // with some other abstraction.
+ DCHECK_EQ(GetLockOwnerThreadId(), Thread::Current()->GetThreadId())
+ << "The " << PrettyClass() << " object should be locked when writing to the extData field.";
+ DCHECK(GetExtData() == nullptr)
+ << "The extData for " << PrettyClass() << " has already been set!";
if (Runtime::Current()->IsActiveTransaction()) {
- SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_), error);
+ SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Class, ext_data_), ext);
} else {
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_), error);
+ SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, ext_data_), ext);
}
}
@@ -95,9 +108,33 @@
}
}
- // Remember the current exception.
- CHECK(self->GetException() != nullptr);
- h_this->SetVerifyError(self->GetException());
+ {
+ // Ensure we lock around 'this' when we set the ClassExt.
+ ObjectLock<mirror::Class> lock(self, h_this);
+ StackHandleScope<2> hs(self);
+ // Remember the current exception.
+ Handle<Throwable> exception(hs.NewHandle(self->GetException()));
+ CHECK(exception.Get() != nullptr);
+ MutableHandle<ClassExt> ext(hs.NewHandle(h_this->GetExtData()));
+ if (ext.Get() == nullptr) {
+ // Cannot have exception while allocating.
+ self->ClearException();
+ ext.Assign(ClassExt::Alloc(self));
+ DCHECK(ext.Get() == nullptr || ext->GetVerifyError() == nullptr);
+ if (ext.Get() != nullptr) {
+ self->AssertNoPendingException();
+ h_this->SetExtData(ext.Get());
+ self->SetException(exception.Get());
+ } else {
+ // TODO Should we restore the old exception anyway?
+ self->AssertPendingOOMException();
+ }
+ }
+ if (ext.Get() != nullptr) {
+ ext->SetVerifyError(self->GetException());
+ }
+ }
+ self->AssertPendingException();
}
static_assert(sizeof(Status) == sizeof(uint32_t), "Size of status not equal to uint32");
if (Runtime::Current()->IsActiveTransaction()) {
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 57bb2ed..23c70ff 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -49,6 +49,7 @@
namespace mirror {
+class ClassExt;
class ClassLoader;
class Constructor;
class DexCache;
@@ -1130,10 +1131,7 @@
void SetClinitThreadId(pid_t new_clinit_thread_id) REQUIRES_SHARED(Locks::mutator_lock_);
- Object* GetVerifyError() REQUIRES_SHARED(Locks::mutator_lock_) {
- // DCHECK(IsErroneous());
- return GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_));
- }
+ ClassExt* GetExtData() REQUIRES_SHARED(Locks::mutator_lock_);
uint16_t GetDexClassDefIndex() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_class_def_idx_));
@@ -1322,7 +1320,8 @@
ALWAYS_INLINE void SetMethodsPtrInternal(LengthPrefixedArray<ArtMethod>* new_methods)
REQUIRES_SHARED(Locks::mutator_lock_);
- void SetVerifyError(ObjPtr<Object> klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ // Set the extData field. This should be done while the 'this' is locked to prevent races.
+ void SetExtData(ObjPtr<ClassExt> ext) REQUIRES_SHARED(Locks::mutator_lock_);
template <bool throw_on_failure, bool use_referrers_cache>
bool ResolvedFieldAccessTest(ObjPtr<Class> access_to,
@@ -1388,6 +1387,12 @@
// runtime such as arrays and primitive classes).
HeapReference<DexCache> dex_cache_;
+ // Extraneous class data that is not always needed. This field is allocated lazily and may
+ // only be set with 'this' locked. This is synchronized on 'this'.
+ // TODO(allight) We should probably synchronize it on something external or handle allocation in
+ // some other (safe) way to prevent possible deadlocks.
+ HeapReference<ClassExt> ext_data_;
+
// The interface table (iftable_) contains pairs of a interface class and an array of the
// interface methods. There is one pair per interface supported by this class. That means one
// pair for each interface we support directly, indirectly via superclass, or indirectly via a
@@ -1412,10 +1417,6 @@
// check for interfaces and return null.
HeapReference<Class> super_class_;
- // If class verify fails, we must return same error on subsequent tries. We may store either
- // the class of the error, or an actual instance of Throwable here.
- HeapReference<Object> verify_error_;
-
// Virtual method table (vtable), for use by "invoke-virtual". The vtable from the superclass is
// copied in, and virtual methods from our class either replace those from the super or are
// appended. For abstract classes, methods may be created in the vtable that aren't in
diff --git a/runtime/mirror/class_ext.cc b/runtime/mirror/class_ext.cc
new file mode 100644
index 0000000..cc208e4
--- /dev/null
+++ b/runtime/mirror/class_ext.cc
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "class_ext.h"
+
+#include "art_method-inl.h"
+#include "base/casts.h"
+#include "base/enums.h"
+#include "class-inl.h"
+#include "dex_file-inl.h"
+#include "gc/accounting/card_table-inl.h"
+#include "object-inl.h"
+#include "object_array.h"
+#include "object_array-inl.h"
+#include "stack_trace_element.h"
+#include "utils.h"
+#include "well_known_classes.h"
+
+namespace art {
+namespace mirror {
+
+GcRoot<Class> ClassExt::dalvik_system_ClassExt_;
+
+ClassExt* ClassExt::Alloc(Thread* self) {
+ DCHECK(dalvik_system_ClassExt_.Read() != nullptr);
+ return down_cast<ClassExt*>(dalvik_system_ClassExt_.Read()->AllocObject(self).Ptr());
+}
+
+void ClassExt::SetVerifyError(ObjPtr<Object> err) {
+ if (Runtime::Current()->IsActiveTransaction()) {
+ SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(ClassExt, verify_error_), err);
+ } else {
+ SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ClassExt, verify_error_), err);
+ }
+}
+
+void ClassExt::SetClass(ObjPtr<Class> dalvik_system_ClassExt) {
+ CHECK(dalvik_system_ClassExt != nullptr);
+ dalvik_system_ClassExt_ = GcRoot<Class>(dalvik_system_ClassExt);
+}
+
+void ClassExt::ResetClass() {
+ CHECK(!dalvik_system_ClassExt_.IsNull());
+ dalvik_system_ClassExt_ = GcRoot<Class>(nullptr);
+}
+
+void ClassExt::VisitRoots(RootVisitor* visitor) {
+ dalvik_system_ClassExt_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
+}
+
+} // namespace mirror
+} // namespace art
diff --git a/runtime/mirror/class_ext.h b/runtime/mirror/class_ext.h
new file mode 100644
index 0000000..35eaae1
--- /dev/null
+++ b/runtime/mirror/class_ext.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MIRROR_CLASS_EXT_H_
+#define ART_RUNTIME_MIRROR_CLASS_EXT_H_
+
+#include "class-inl.h"
+
+#include "gc_root.h"
+#include "object.h"
+#include "object_callbacks.h"
+#include "string.h"
+
+namespace art {
+
+struct ClassExtOffsets;
+
+namespace mirror {
+
+// C++ mirror of dalvik.system.ClassExt
+class MANAGED ClassExt : public Object {
+ public:
+ static uint32_t ClassSize(PointerSize pointer_size) {
+ uint32_t vtable_entries = Object::kVTableLength;
+ return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size);
+ }
+
+ // Size of an instance of dalvik.system.ClassExt.
+ static constexpr uint32_t InstanceSize() {
+ return sizeof(ClassExt);
+ }
+
+ void SetVerifyError(ObjPtr<Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ Object* GetVerifyError() REQUIRES_SHARED(Locks::mutator_lock_) {
+ return GetFieldObject<ClassExt>(OFFSET_OF_OBJECT_MEMBER(ClassExt, verify_error_));
+ }
+
+ static void SetClass(ObjPtr<Class> dalvik_system_ClassExt);
+ static void ResetClass();
+ static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ static ClassExt* Alloc(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ private:
+ // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
+ HeapReference<Object> verify_error_;
+
+ static GcRoot<Class> dalvik_system_ClassExt_;
+
+ friend struct art::ClassExtOffsets; // for verifying offset information
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ClassExt);
+};
+
+} // namespace mirror
+} // namespace art
+
+#endif // ART_RUNTIME_MIRROR_CLASS_EXT_H_
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 262608d..b868563 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -85,6 +85,7 @@
#include "linear_alloc.h"
#include "mirror/array.h"
#include "mirror/class-inl.h"
+#include "mirror/class_ext.h"
#include "mirror/class_loader.h"
#include "mirror/emulated_stack_frame.h"
#include "mirror/field.h"
@@ -1595,6 +1596,7 @@
mirror::MethodType::VisitRoots(visitor);
mirror::MethodHandleImpl::VisitRoots(visitor);
mirror::EmulatedStackFrame::VisitRoots(visitor);
+ mirror::ClassExt::VisitRoots(visitor);
// Visit all the primitive array types classes.
mirror::PrimitiveArray<uint8_t>::VisitRoots(visitor); // BooleanArray
mirror::PrimitiveArray<int8_t>::VisitRoots(visitor); // ByteArray
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 2797d85..53d717a 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -393,7 +393,9 @@
}
ObjPtr<mirror::Class> WellKnownClasses::ToClass(jclass global_jclass) {
- return ObjPtr<mirror::Class>::DownCast(Thread::Current()->DecodeJObject(global_jclass));
+ auto ret = ObjPtr<mirror::Class>::DownCast(Thread::Current()->DecodeJObject(global_jclass));
+ DCHECK(!ret.IsNull());
+ return ret;
}
} // namespace art
diff --git a/test/480-checker-dead-blocks/src/Main.java b/test/480-checker-dead-blocks/src/Main.java
index 141054d..0ca822f 100644
--- a/test/480-checker-dead-blocks/src/Main.java
+++ b/test/480-checker-dead-blocks/src/Main.java
@@ -30,7 +30,7 @@
return false;
}
- /// CHECK-START: int Main.testTrueBranch(int, int) dead_code_elimination$final (before)
+ /// CHECK-START: int Main.testTrueBranch(int, int) dead_code_elimination$after_inlining (before)
/// CHECK-DAG: <<ArgX:i\d+>> ParameterValue
/// CHECK-DAG: <<ArgY:i\d+>> ParameterValue
/// CHECK-DAG: If
@@ -39,13 +39,13 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi [<<Add>>,<<Sub>>]
/// CHECK-DAG: Return [<<Phi>>]
- /// CHECK-START: int Main.testTrueBranch(int, int) dead_code_elimination$final (after)
+ /// CHECK-START: int Main.testTrueBranch(int, int) dead_code_elimination$after_inlining (after)
/// CHECK-DAG: <<ArgX:i\d+>> ParameterValue
/// CHECK-DAG: <<ArgY:i\d+>> ParameterValue
/// CHECK-DAG: <<Add:i\d+>> Add [<<ArgX>>,<<ArgY>>]
/// CHECK-DAG: Return [<<Add>>]
- /// CHECK-START: int Main.testTrueBranch(int, int) dead_code_elimination$final (after)
+ /// CHECK-START: int Main.testTrueBranch(int, int) dead_code_elimination$after_inlining (after)
/// CHECK-NOT: If
/// CHECK-NOT: Sub
/// CHECK-NOT: Phi
@@ -62,7 +62,7 @@
return z;
}
- /// CHECK-START: int Main.testFalseBranch(int, int) dead_code_elimination$final (before)
+ /// CHECK-START: int Main.testFalseBranch(int, int) dead_code_elimination$after_inlining (before)
/// CHECK-DAG: <<ArgX:i\d+>> ParameterValue
/// CHECK-DAG: <<ArgY:i\d+>> ParameterValue
/// CHECK-DAG: If
@@ -71,13 +71,13 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi [<<Add>>,<<Sub>>]
/// CHECK-DAG: Return [<<Phi>>]
- /// CHECK-START: int Main.testFalseBranch(int, int) dead_code_elimination$final (after)
+ /// CHECK-START: int Main.testFalseBranch(int, int) dead_code_elimination$after_inlining (after)
/// CHECK-DAG: <<ArgX:i\d+>> ParameterValue
/// CHECK-DAG: <<ArgY:i\d+>> ParameterValue
/// CHECK-DAG: <<Sub:i\d+>> Sub [<<ArgX>>,<<ArgY>>]
/// CHECK-DAG: Return [<<Sub>>]
- /// CHECK-START: int Main.testFalseBranch(int, int) dead_code_elimination$final (after)
+ /// CHECK-START: int Main.testFalseBranch(int, int) dead_code_elimination$after_inlining (after)
/// CHECK-NOT: If
/// CHECK-NOT: Add
/// CHECK-NOT: Phi
@@ -94,10 +94,10 @@
return z;
}
- /// CHECK-START: int Main.testRemoveLoop(int) dead_code_elimination$final (before)
+ /// CHECK-START: int Main.testRemoveLoop(int) dead_code_elimination$after_inlining (before)
/// CHECK: Mul
- /// CHECK-START: int Main.testRemoveLoop(int) dead_code_elimination$final (after)
+ /// CHECK-START: int Main.testRemoveLoop(int) dead_code_elimination$after_inlining (after)
/// CHECK-NOT: Mul
public static int testRemoveLoop(int x) {
@@ -109,11 +109,11 @@
return x;
}
- /// CHECK-START: int Main.testInfiniteLoop(int) dead_code_elimination$final (before)
+ /// CHECK-START: int Main.testInfiniteLoop(int) dead_code_elimination$after_inlining (before)
/// CHECK-DAG: Return
/// CHECK-DAG: Exit
- /// CHECK-START: int Main.testInfiniteLoop(int) dead_code_elimination$final (after)
+ /// CHECK-START: int Main.testInfiniteLoop(int) dead_code_elimination$after_inlining (after)
/// CHECK-NOT: Return
/// CHECK-NOT: Exit
@@ -124,15 +124,15 @@
return x;
}
- /// CHECK-START: int Main.testDeadLoop(int) dead_code_elimination$final (before)
+ /// CHECK-START: int Main.testDeadLoop(int) dead_code_elimination$after_inlining (before)
/// CHECK-DAG: If
/// CHECK-DAG: Add
- /// CHECK-START: int Main.testDeadLoop(int) dead_code_elimination$final (after)
+ /// CHECK-START: int Main.testDeadLoop(int) dead_code_elimination$after_inlining (after)
/// CHECK-DAG: <<Arg:i\d+>> ParameterValue
/// CHECK-DAG: Return [<<Arg>>]
- /// CHECK-START: int Main.testDeadLoop(int) dead_code_elimination$final (after)
+ /// CHECK-START: int Main.testDeadLoop(int) dead_code_elimination$after_inlining (after)
/// CHECK-NOT: If
/// CHECK-NOT: Add
@@ -143,16 +143,16 @@
return x;
}
- /// CHECK-START: int Main.testUpdateLoopInformation(int) dead_code_elimination$final (before)
+ /// CHECK-START: int Main.testUpdateLoopInformation(int) dead_code_elimination$after_inlining (before)
/// CHECK-DAG: If
/// CHECK-DAG: If
/// CHECK-DAG: Add
- /// CHECK-START: int Main.testUpdateLoopInformation(int) dead_code_elimination$final (after)
+ /// CHECK-START: int Main.testUpdateLoopInformation(int) dead_code_elimination$after_inlining (after)
/// CHECK-DAG: <<Arg:i\d+>> ParameterValue
/// CHECK-DAG: Return [<<Arg>>]
- /// CHECK-START: int Main.testUpdateLoopInformation(int) dead_code_elimination$final (after)
+ /// CHECK-START: int Main.testUpdateLoopInformation(int) dead_code_elimination$after_inlining (after)
/// CHECK-NOT: If
/// CHECK-NOT: Add
@@ -165,13 +165,13 @@
return x;
}
- /// CHECK-START: int Main.testRemoveSuspendCheck(int, int) dead_code_elimination$final (before)
+ /// CHECK-START: int Main.testRemoveSuspendCheck(int, int) dead_code_elimination$after_inlining (before)
/// CHECK: SuspendCheck
/// CHECK: SuspendCheck
/// CHECK: SuspendCheck
/// CHECK-NOT: SuspendCheck
- /// CHECK-START: int Main.testRemoveSuspendCheck(int, int) dead_code_elimination$final (after)
+ /// CHECK-START: int Main.testRemoveSuspendCheck(int, int) dead_code_elimination$after_inlining (after)
/// CHECK: SuspendCheck
/// CHECK: SuspendCheck
/// CHECK-NOT: SuspendCheck
diff --git a/test/485-checker-dce-loop-update/smali/TestCase.smali b/test/485-checker-dce-loop-update/smali/TestCase.smali
index e3617c7..cda6f73 100644
--- a/test/485-checker-dce-loop-update/smali/TestCase.smali
+++ b/test/485-checker-dce-loop-update/smali/TestCase.smali
@@ -23,7 +23,7 @@
.end method
-## CHECK-START: int TestCase.testSingleExit(int, boolean) dead_code_elimination$final (before)
+## CHECK-START: int TestCase.testSingleExit(int, boolean) dead_code_elimination$after_inlining (before)
## CHECK-DAG: <<ArgX:i\d+>> ParameterValue
## CHECK-DAG: <<ArgY:z\d+>> ParameterValue
## CHECK-DAG: <<Cst1:i\d+>> IntConstant 1
@@ -36,7 +36,7 @@
## CHECK-DAG: <<Add7>> Add [<<PhiX>>,<<Cst7>>] loop:<<HeaderY>>
## CHECK-DAG: Return [<<PhiX>>] loop:none
-## CHECK-START: int TestCase.testSingleExit(int, boolean) dead_code_elimination$final (after)
+## CHECK-START: int TestCase.testSingleExit(int, boolean) dead_code_elimination$after_inlining (after)
## CHECK-DAG: <<ArgX:i\d+>> ParameterValue
## CHECK-DAG: <<ArgY:z\d+>> ParameterValue
## CHECK-DAG: <<Cst7:i\d+>> IntConstant 7
@@ -73,7 +73,7 @@
.end method
-## CHECK-START: int TestCase.testMultipleExits(int, boolean, boolean) dead_code_elimination$final (before)
+## CHECK-START: int TestCase.testMultipleExits(int, boolean, boolean) dead_code_elimination$after_inlining (before)
## CHECK-DAG: <<ArgX:i\d+>> ParameterValue
## CHECK-DAG: <<ArgY:z\d+>> ParameterValue
## CHECK-DAG: <<ArgZ:z\d+>> ParameterValue
@@ -88,7 +88,7 @@
## CHECK-DAG: <<Add7>> Add [<<PhiX>>,<<Cst7>>] loop:<<HeaderY>>
## CHECK-DAG: Return [<<PhiX>>] loop:none
-## CHECK-START: int TestCase.testMultipleExits(int, boolean, boolean) dead_code_elimination$final (after)
+## CHECK-START: int TestCase.testMultipleExits(int, boolean, boolean) dead_code_elimination$after_inlining (after)
## CHECK-DAG: <<ArgX:i\d+>> ParameterValue
## CHECK-DAG: <<ArgY:z\d+>> ParameterValue
## CHECK-DAG: <<ArgZ:z\d+>> ParameterValue
@@ -129,7 +129,7 @@
.end method
-## CHECK-START: int TestCase.testExitPredecessors(int, boolean, boolean) dead_code_elimination$final (before)
+## CHECK-START: int TestCase.testExitPredecessors(int, boolean, boolean) dead_code_elimination$after_inlining (before)
## CHECK-DAG: <<ArgX:i\d+>> ParameterValue
## CHECK-DAG: <<ArgY:z\d+>> ParameterValue
## CHECK-DAG: <<ArgZ:z\d+>> ParameterValue
@@ -146,7 +146,7 @@
## CHECK-DAG: <<Add7>> Add [<<PhiX>>,<<Cst7>>] loop:<<HeaderY>>
## CHECK-DAG: Return [<<SelX>>] loop:none
-## CHECK-START: int TestCase.testExitPredecessors(int, boolean, boolean) dead_code_elimination$final (after)
+## CHECK-START: int TestCase.testExitPredecessors(int, boolean, boolean) dead_code_elimination$after_inlining (after)
## CHECK-DAG: <<ArgX:i\d+>> ParameterValue
## CHECK-DAG: <<ArgY:z\d+>> ParameterValue
## CHECK-DAG: <<ArgZ:z\d+>> ParameterValue
@@ -194,7 +194,7 @@
.end method
-## CHECK-START: int TestCase.testInnerLoop(int, boolean, boolean) dead_code_elimination$final (before)
+## CHECK-START: int TestCase.testInnerLoop(int, boolean, boolean) dead_code_elimination$after_inlining (before)
## CHECK-DAG: <<ArgX:i\d+>> ParameterValue
## CHECK-DAG: <<ArgY:z\d+>> ParameterValue
## CHECK-DAG: <<ArgZ:z\d+>> ParameterValue
@@ -217,7 +217,7 @@
## CHECK-DAG: <<Add7>> Add [<<PhiX>>,<<Cst7>>] loop:<<HeaderY>>
## CHECK-DAG: Return [<<PhiX>>] loop:none
-## CHECK-START: int TestCase.testInnerLoop(int, boolean, boolean) dead_code_elimination$final (after)
+## CHECK-START: int TestCase.testInnerLoop(int, boolean, boolean) dead_code_elimination$after_inlining (after)
## CHECK-DAG: <<ArgX:i\d+>> ParameterValue
## CHECK-DAG: <<ArgY:z\d+>> ParameterValue
## CHECK-DAG: <<ArgZ:z\d+>> ParameterValue
diff --git a/test/485-checker-dce-switch/src/Main.java b/test/485-checker-dce-switch/src/Main.java
index 7d5fd4f..95b1a93 100644
--- a/test/485-checker-dce-switch/src/Main.java
+++ b/test/485-checker-dce-switch/src/Main.java
@@ -20,14 +20,14 @@
return 5;
}
- /// CHECK-START: int Main.wholeSwitchDead(int) dead_code_elimination$final (before)
+ /// CHECK-START: int Main.wholeSwitchDead(int) dead_code_elimination$after_inlining (before)
/// CHECK-DAG: PackedSwitch
- /// CHECK-START: int Main.wholeSwitchDead(int) dead_code_elimination$final (after)
+ /// CHECK-START: int Main.wholeSwitchDead(int) dead_code_elimination$after_inlining (after)
/// CHECK-DAG: <<Const100:i\d+>> IntConstant 100
/// CHECK-DAG: Return [<<Const100>>]
- /// CHECK-START: int Main.wholeSwitchDead(int) dead_code_elimination$final (after)
+ /// CHECK-START: int Main.wholeSwitchDead(int) dead_code_elimination$after_inlining (after)
/// CHECK-NOT: PackedSwitch
public static int wholeSwitchDead(int j) {
@@ -60,14 +60,14 @@
return l;
}
- /// CHECK-START: int Main.constantSwitch_InRange() dead_code_elimination$final (before)
+ /// CHECK-START: int Main.constantSwitch_InRange() dead_code_elimination$after_inlining (before)
/// CHECK-DAG: PackedSwitch
- /// CHECK-START: int Main.constantSwitch_InRange() dead_code_elimination$final (after)
+ /// CHECK-START: int Main.constantSwitch_InRange() dead_code_elimination$after_inlining (after)
/// CHECK-DAG: <<Const7:i\d+>> IntConstant 7
/// CHECK-DAG: Return [<<Const7>>]
- /// CHECK-START: int Main.constantSwitch_InRange() dead_code_elimination$final (after)
+ /// CHECK-START: int Main.constantSwitch_InRange() dead_code_elimination$after_inlining (after)
/// CHECK-NOT: PackedSwitch
public static int constantSwitch_InRange() {
@@ -96,14 +96,14 @@
return i;
}
- /// CHECK-START: int Main.constantSwitch_AboveRange() dead_code_elimination$final (before)
+ /// CHECK-START: int Main.constantSwitch_AboveRange() dead_code_elimination$after_inlining (before)
/// CHECK-DAG: PackedSwitch
- /// CHECK-START: int Main.constantSwitch_AboveRange() dead_code_elimination$final (after)
+ /// CHECK-START: int Main.constantSwitch_AboveRange() dead_code_elimination$after_inlining (after)
/// CHECK-DAG: <<Const15:i\d+>> IntConstant 15
/// CHECK-DAG: Return [<<Const15>>]
- /// CHECK-START: int Main.constantSwitch_AboveRange() dead_code_elimination$final (after)
+ /// CHECK-START: int Main.constantSwitch_AboveRange() dead_code_elimination$after_inlining (after)
/// CHECK-NOT: PackedSwitch
public static int constantSwitch_AboveRange() {
@@ -132,14 +132,14 @@
return i;
}
- /// CHECK-START: int Main.constantSwitch_BelowRange() dead_code_elimination$final (before)
+ /// CHECK-START: int Main.constantSwitch_BelowRange() dead_code_elimination$after_inlining (before)
/// CHECK-DAG: PackedSwitch
- /// CHECK-START: int Main.constantSwitch_BelowRange() dead_code_elimination$final (after)
+ /// CHECK-START: int Main.constantSwitch_BelowRange() dead_code_elimination$after_inlining (after)
/// CHECK-DAG: <<ConstM5:i\d+>> IntConstant -5
/// CHECK-DAG: Return [<<ConstM5>>]
- /// CHECK-START: int Main.constantSwitch_BelowRange() dead_code_elimination$final (after)
+ /// CHECK-START: int Main.constantSwitch_BelowRange() dead_code_elimination$after_inlining (after)
/// CHECK-NOT: PackedSwitch
public static int constantSwitch_BelowRange() {
diff --git a/test/543-checker-dce-trycatch/smali/TestCase.smali b/test/543-checker-dce-trycatch/smali/TestCase.smali
index 5557c7b..f50e01e 100644
--- a/test/543-checker-dce-trycatch/smali/TestCase.smali
+++ b/test/543-checker-dce-trycatch/smali/TestCase.smali
@@ -26,18 +26,18 @@
# Test a case when one entering TryBoundary is dead but the rest of the try
# block remains live.
-## CHECK-START: int TestCase.testDeadEntry(int, int, int, int) dead_code_elimination$final (before)
+## CHECK-START: int TestCase.testDeadEntry(int, int, int, int) dead_code_elimination$after_inlining (before)
## CHECK: Add
-## CHECK-START: int TestCase.testDeadEntry(int, int, int, int) dead_code_elimination$final (before)
+## CHECK-START: int TestCase.testDeadEntry(int, int, int, int) dead_code_elimination$after_inlining (before)
## CHECK: TryBoundary kind:entry
## CHECK: TryBoundary kind:entry
## CHECK-NOT: TryBoundary kind:entry
-## CHECK-START: int TestCase.testDeadEntry(int, int, int, int) dead_code_elimination$final (after)
+## CHECK-START: int TestCase.testDeadEntry(int, int, int, int) dead_code_elimination$after_inlining (after)
## CHECK-NOT: Add
-## CHECK-START: int TestCase.testDeadEntry(int, int, int, int) dead_code_elimination$final (after)
+## CHECK-START: int TestCase.testDeadEntry(int, int, int, int) dead_code_elimination$after_inlining (after)
## CHECK: TryBoundary kind:entry
## CHECK-NOT: TryBoundary kind:entry
@@ -71,18 +71,18 @@
# Test a case when one exiting TryBoundary is dead but the rest of the try
# block remains live.
-## CHECK-START: int TestCase.testDeadExit(int, int, int, int) dead_code_elimination$final (before)
+## CHECK-START: int TestCase.testDeadExit(int, int, int, int) dead_code_elimination$after_inlining (before)
## CHECK: Add
-## CHECK-START: int TestCase.testDeadExit(int, int, int, int) dead_code_elimination$final (before)
+## CHECK-START: int TestCase.testDeadExit(int, int, int, int) dead_code_elimination$after_inlining (before)
## CHECK: TryBoundary kind:exit
## CHECK: TryBoundary kind:exit
## CHECK-NOT: TryBoundary kind:exit
-## CHECK-START: int TestCase.testDeadExit(int, int, int, int) dead_code_elimination$final (after)
+## CHECK-START: int TestCase.testDeadExit(int, int, int, int) dead_code_elimination$after_inlining (after)
## CHECK-NOT: Add
-## CHECK-START: int TestCase.testDeadExit(int, int, int, int) dead_code_elimination$final (after)
+## CHECK-START: int TestCase.testDeadExit(int, int, int, int) dead_code_elimination$after_inlining (after)
## CHECK: TryBoundary kind:exit
## CHECK-NOT: TryBoundary kind:exit
@@ -117,21 +117,21 @@
# Test that a catch block remains live and consistent if some of try blocks
# throwing into it are removed.
-## CHECK-START: int TestCase.testOneTryBlockDead(int, int, int, int) dead_code_elimination$final (before)
+## CHECK-START: int TestCase.testOneTryBlockDead(int, int, int, int) dead_code_elimination$after_inlining (before)
## CHECK: TryBoundary kind:entry
## CHECK: TryBoundary kind:entry
## CHECK-NOT: TryBoundary kind:entry
-## CHECK-START: int TestCase.testOneTryBlockDead(int, int, int, int) dead_code_elimination$final (before)
+## CHECK-START: int TestCase.testOneTryBlockDead(int, int, int, int) dead_code_elimination$after_inlining (before)
## CHECK: TryBoundary kind:exit
## CHECK: TryBoundary kind:exit
## CHECK-NOT: TryBoundary kind:exit
-## CHECK-START: int TestCase.testOneTryBlockDead(int, int, int, int) dead_code_elimination$final (after)
+## CHECK-START: int TestCase.testOneTryBlockDead(int, int, int, int) dead_code_elimination$after_inlining (after)
## CHECK: TryBoundary kind:entry
## CHECK-NOT: TryBoundary kind:entry
-## CHECK-START: int TestCase.testOneTryBlockDead(int, int, int, int) dead_code_elimination$final (after)
+## CHECK-START: int TestCase.testOneTryBlockDead(int, int, int, int) dead_code_elimination$after_inlining (after)
## CHECK: TryBoundary kind:exit
## CHECK-NOT: TryBoundary kind:exit
@@ -203,7 +203,7 @@
# Test that DCE removes catch phi uses of instructions defined in dead try blocks.
-## CHECK-START: int TestCase.testCatchPhiInputs_DefinedInTryBlock(int, int, int, int) dead_code_elimination$final (before)
+## CHECK-START: int TestCase.testCatchPhiInputs_DefinedInTryBlock(int, int, int, int) dead_code_elimination$after_inlining (before)
## CHECK-DAG: <<Arg0:i\d+>> ParameterValue
## CHECK-DAG: <<Arg1:i\d+>> ParameterValue
## CHECK-DAG: <<Const0xa:i\d+>> IntConstant 10
@@ -220,7 +220,7 @@
## CHECK-DAG: Phi [<<Add>>,<<Const0xc>>,<<Const0xe>>] reg:2 is_catch_phi:true
## CHECK-DAG: Phi [<<Select>>,<<Const0x10>>,<<Const0x11>>] reg:3 is_catch_phi:true
-## CHECK-START: int TestCase.testCatchPhiInputs_DefinedInTryBlock(int, int, int, int) dead_code_elimination$final (after)
+## CHECK-START: int TestCase.testCatchPhiInputs_DefinedInTryBlock(int, int, int, int) dead_code_elimination$after_inlining (after)
## CHECK-DAG: <<Const0xb:i\d+>> IntConstant 11
## CHECK-DAG: <<Const0xc:i\d+>> IntConstant 12
## CHECK-DAG: <<Const0xd:i\d+>> IntConstant 13
@@ -277,7 +277,7 @@
# Test that DCE does not remove catch phi uses of instructions defined outside
# dead try blocks.
-## CHECK-START: int TestCase.testCatchPhiInputs_DefinedOutsideTryBlock(int, int, int, int) dead_code_elimination$final (before)
+## CHECK-START: int TestCase.testCatchPhiInputs_DefinedOutsideTryBlock(int, int, int, int) dead_code_elimination$after_inlining (before)
## CHECK-DAG: <<Const0xa:i\d+>> IntConstant 10
## CHECK-DAG: <<Const0xb:i\d+>> IntConstant 11
## CHECK-DAG: <<Const0xc:i\d+>> IntConstant 12
@@ -287,7 +287,7 @@
## CHECK-DAG: Phi [<<Const0xa>>,<<Const0xb>>,<<Const0xd>>] reg:1 is_catch_phi:true
## CHECK-DAG: Phi [<<Const0xf>>,<<Const0xc>>,<<Const0xe>>] reg:2 is_catch_phi:true
-## CHECK-START: int TestCase.testCatchPhiInputs_DefinedOutsideTryBlock(int, int, int, int) dead_code_elimination$final (after)
+## CHECK-START: int TestCase.testCatchPhiInputs_DefinedOutsideTryBlock(int, int, int, int) dead_code_elimination$after_inlining (after)
## CHECK-DAG: <<Const0xa:i\d+>> IntConstant 10
## CHECK-DAG: <<Const0xb:i\d+>> IntConstant 11
## CHECK-DAG: <<Const0xc:i\d+>> IntConstant 12
diff --git a/test/543-checker-dce-trycatch/src/Main.java b/test/543-checker-dce-trycatch/src/Main.java
index 19587e7..0d7596a 100644
--- a/test/543-checker-dce-trycatch/src/Main.java
+++ b/test/543-checker-dce-trycatch/src/Main.java
@@ -35,10 +35,10 @@
// where TryBoundary still has exception handler successors after having removed
// some already.
- /// CHECK-START: void Main.testDeadTryCatch(boolean) dead_code_elimination$final (after)
+ /// CHECK-START: void Main.testDeadTryCatch(boolean) dead_code_elimination$after_inlining (after)
/// CHECK-NOT: TryBoundary
- /// CHECK-START: void Main.testDeadTryCatch(boolean) dead_code_elimination$final (after)
+ /// CHECK-START: void Main.testDeadTryCatch(boolean) dead_code_elimination$after_inlining (after)
/// CHECK: begin_block
/// CHECK: begin_block
/// CHECK: begin_block
@@ -63,4 +63,4 @@
public static void main(String[] args) {
}
-}
\ No newline at end of file
+}
diff --git a/test/611-checker-simplify-if/src/Main.java b/test/611-checker-simplify-if/src/Main.java
index 7dac007..774f239 100644
--- a/test/611-checker-simplify-if/src/Main.java
+++ b/test/611-checker-simplify-if/src/Main.java
@@ -64,13 +64,13 @@
// Test when the phi is the input of the if.
- /// CHECK-START: void Main.testInline(java.lang.String[]) dead_code_elimination$final (before)
+ /// CHECK-START: void Main.testInline(java.lang.String[]) dead_code_elimination$after_inlining (before)
/// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
/// CHECK-DAG: If
/// CHECK-DAG: <<Phi:i\d+>> Phi
/// CHECK-DAG: If [<<Phi>>]
- /// CHECK-START: void Main.testInline(java.lang.String[]) dead_code_elimination$final (after)
+ /// CHECK-START: void Main.testInline(java.lang.String[]) dead_code_elimination$after_inlining (after)
/// CHECK: If
/// CHECK-NOT: Phi
/// CHECK-NOT: If
diff --git a/test/620-checker-bce-intrinsics/expected.txt b/test/620-checker-bce-intrinsics/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/620-checker-bce-intrinsics/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/620-checker-bce-intrinsics/info.txt b/test/620-checker-bce-intrinsics/info.txt
new file mode 100644
index 0000000..a868845
--- /dev/null
+++ b/test/620-checker-bce-intrinsics/info.txt
@@ -0,0 +1 @@
+Test on bounds check elimination in loops using intrinsics.
diff --git a/test/620-checker-bce-intrinsics/src/Main.java b/test/620-checker-bce-intrinsics/src/Main.java
new file mode 100644
index 0000000..afc3c65
--- /dev/null
+++ b/test/620-checker-bce-intrinsics/src/Main.java
@@ -0,0 +1,285 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests on bounds check elimination in loops that use intrinsics.
+ * All bounds checks below should be statically eliminated.
+ */
+public class Main {
+
+ /// CHECK-START: int Main.oneArray(int[]) BCE (before)
+ /// CHECK-DAG: BoundsCheck loop:<<Loop:B\d+>> outer_loop:none
+ //
+ /// CHECK-START: int Main.oneArray(int[]) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK-NOT: Deoptimize
+ static int oneArray(int[] a) {
+ int x = 0;
+ for (int i = 0; i < a.length; i++) {
+ x += a[i];
+ }
+ return x;
+ }
+
+ /// CHECK-START: int Main.oneArrayAbs(int[], int) BCE (before)
+ /// CHECK-DAG: BoundsCheck loop:<<Loop:B\d+>> outer_loop:none
+ //
+ /// CHECK-START: int Main.oneArrayAbs(int[], int) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK-NOT: Deoptimize
+ static int oneArrayAbs(int[] a, int lo) {
+ int x = 0;
+ for (int i = Math.abs(lo); i < a.length; i++) {
+ x += a[i];
+ }
+ return x;
+ }
+
+
+ /// CHECK-START: int Main.twoArrays(int[], int[]) BCE (before)
+ /// CHECK-DAG: BoundsCheck loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: BoundsCheck loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: int Main.twoArrays(int[], int[]) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK-NOT: Deoptimize
+ static int twoArrays(int[] a, int[] b) {
+ int x = 0;
+ for (int i = 0; i < Math.min(a.length, b.length); i++) {
+ x += a[i] + b[i];
+ }
+ return x;
+ }
+
+ /// CHECK-START: int Main.threeArrays(int[], int[], int[]) BCE (before)
+ /// CHECK-DAG: BoundsCheck loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: BoundsCheck loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: BoundsCheck loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: int Main.threeArrays(int[], int[], int[]) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK-NOT: Deoptimize
+ static int threeArrays(int[] a, int[] b, int[] c) {
+ int x = 0;
+ for (int i = 0; i < Math.min(Math.min(a.length, b.length), c.length); i++) {
+ x += a[i] + b[i] + c[i];
+ }
+ return x;
+ }
+
+ /// CHECK-START: int Main.fourArrays(int[], int[], int[], int[]) BCE (before)
+ /// CHECK-DAG: BoundsCheck loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: BoundsCheck loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: BoundsCheck loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: BoundsCheck loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: int Main.fourArrays(int[], int[], int[], int[]) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK-NOT: Deoptimize
+ static int fourArrays(int[] a, int[] b, int[] c, int[] d) {
+ int x = 0;
+ for (int i = 0; i < Math.min(Math.min(a.length, b.length), Math.min(c.length, d.length)); i++) {
+ x += a[i] + b[i] + c[i] + d[i];
+ }
+ return x;
+ }
+
+ /// CHECK-START: int Main.oneArrayWithCleanup(int[]) BCE (before)
+ /// CHECK-DAG: BoundsCheck loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: BoundsCheck loop:<<Loop2:B\d+>> outer_loop:none
+ //
+ /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
+ //
+ /// CHECK-START: int Main.oneArrayWithCleanup(int[]) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK-NOT: Deoptimize
+ static int oneArrayWithCleanup(int[] a) {
+ int x = 0;
+ int n = Math.min(4, a.length);
+ for (int i = 0; i < n; i++) {
+ x += a[i];
+ }
+ for (int i = n; i < a.length; i++) {
+ x += a[i] * 10;
+ }
+ return x;
+ }
+
+ /// CHECK-START: int Main.twoArraysWithCleanup(int[], int[]) BCE (before)
+ /// CHECK-DAG: BoundsCheck loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: BoundsCheck loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: BoundsCheck loop:<<Loop2:B\d+>> outer_loop:none
+ //
+ /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
+ //
+ /// CHECK-START: int Main.twoArraysWithCleanup(int[], int[]) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK-NOT: Deoptimize
+ static int twoArraysWithCleanup(int[] a, int[] b) {
+ int x = 0;
+ int n = Math.min(a.length, b.length);
+ for (int i = n - 1; i >= 0; i--) {
+ x += a[i] + b[i];
+ }
+ for (int i = n; i < a.length; i++) {
+ x += a[i];
+ }
+ return x;
+ }
+
+ /// CHECK-START: int Main.threeArraysWithCleanup(int[], int[], int[]) BCE (before)
+ /// CHECK-DAG: BoundsCheck loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: BoundsCheck loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: BoundsCheck loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: BoundsCheck loop:<<Loop2:B\d+>> outer_loop:none
+ //
+ /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
+ //
+ /// CHECK-START: int Main.threeArraysWithCleanup(int[], int[], int[]) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK-NOT: Deoptimize
+ static int threeArraysWithCleanup(int[] a, int[] b, int[] c) {
+ int x = 0;
+ int n = Math.min(a.length, Math.min(b.length, c.length));
+ for (int i = n - 1; i >= 0; i--) {
+ x += a[i] + b[i] + c[i];
+ }
+ for (int i = n; i < a.length; i++) {
+ x += a[i];
+ }
+ return x;
+ }
+
+ /// CHECK-START: int Main.altLoopLogic(int[], int[]) BCE (before)
+ /// CHECK-DAG: BoundsCheck loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: BoundsCheck loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: int Main.altLoopLogic(int[], int[]) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK-NOT: Deoptimize
+ static int altLoopLogic(int[] a, int[] b) {
+ int x = 0;
+ int n = Math.min(a.length, b.length);
+ for (int i = n; i-- > 0;) {
+ x += a[i] + b[i];
+ }
+ return x;
+ }
+
+ /// CHECK-START: int Main.hiddenMin(int[], int[]) BCE (before)
+ /// CHECK-DAG: BoundsCheck loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: BoundsCheck loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: int Main.hiddenMin(int[], int[]) BCE (after)
+ //
+ // TODO: make this so
+ static int hiddenMin(int[] a, int[] b) {
+ int x = 0;
+ for (int i = 0; i < a.length && i < b.length; i++) {
+ x += a[i] + b[i];
+ }
+ return x;
+ }
+
+ /// CHECK-START: int Main.hiddenMinWithCleanup(int[], int[]) BCE (before)
+ /// CHECK-DAG: BoundsCheck loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: BoundsCheck loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: BoundsCheck loop:<<Loop2:B\d+>> outer_loop:none
+ //
+ /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
+ //
+ /// CHECK-START: int Main.hiddenMinWithCleanup(int[], int[]) BCE (after)
+ //
+ // TODO: make this so
+ static int hiddenMinWithCleanup(int[] a, int[] b) {
+ int x = 0;
+ int i = 0;
+ for (; i < a.length && i < b.length; i++) {
+ x += a[i] + b[i];
+ }
+ for (; i < a.length; i++) {
+ x += a[i];
+ }
+ return x;
+ }
+
+ public static void main(String[] args) {
+ int[] a = { 1, 2, 3, 4, 5 };
+ int[] b = { 6, 7, 8, 9, 4, 2 };
+ int[] c = { 1, 2, 3 };
+ int[] d = { 8, 5, 3, 2 };
+
+ expectEquals(15, oneArray(a));
+ expectEquals(36, oneArray(b));
+ expectEquals(6, oneArray(c));
+ expectEquals(18, oneArray(d));
+
+ expectEquals(5, oneArrayAbs(a, -4));
+ expectEquals(15, oneArrayAbs(a, 0));
+ expectEquals(5, oneArrayAbs(a, 4));
+
+ expectEquals(30, twoArrays(a, a));
+ expectEquals(49, twoArrays(a, b));
+ expectEquals(12, twoArrays(a, c));
+ expectEquals(28, twoArrays(a, d));
+
+ expectEquals(45, threeArrays(a, a, a));
+ expectEquals(33, threeArrays(a, b, c));
+ expectEquals(58, threeArrays(a, b, d));
+ expectEquals(28, threeArrays(a, c, d));
+
+ expectEquals(60, fourArrays(a, a, a, a));
+ expectEquals(49, fourArrays(a, b, c, d));
+
+ expectEquals(60, oneArrayWithCleanup(a));
+ expectEquals(90, oneArrayWithCleanup(b));
+ expectEquals(6, oneArrayWithCleanup(c));
+ expectEquals(18, oneArrayWithCleanup(d));
+
+ expectEquals(30, twoArraysWithCleanup(a, a));
+ expectEquals(49, twoArraysWithCleanup(a, b));
+ expectEquals(21, twoArraysWithCleanup(a, c));
+ expectEquals(33, twoArraysWithCleanup(a, d));
+
+ expectEquals(45, threeArraysWithCleanup(a, a, a));
+ expectEquals(42, threeArraysWithCleanup(a, b, c));
+ expectEquals(63, threeArraysWithCleanup(a, b, d));
+ expectEquals(37, threeArraysWithCleanup(a, c, d));
+
+ expectEquals(30, altLoopLogic(a, a));
+ expectEquals(49, altLoopLogic(a, b));
+ expectEquals(12, altLoopLogic(a, c));
+ expectEquals(28, altLoopLogic(a, d));
+
+ expectEquals(30, hiddenMin(a, a));
+ expectEquals(49, hiddenMin(a, b));
+ expectEquals(12, hiddenMin(a, c));
+ expectEquals(28, hiddenMin(a, d));
+
+ expectEquals(30, hiddenMinWithCleanup(a, a));
+ expectEquals(49, hiddenMinWithCleanup(a, b));
+ expectEquals(21, hiddenMinWithCleanup(a, c));
+ expectEquals(33, hiddenMinWithCleanup(a, d));
+
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index cc0d008..cf8db15 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -398,8 +398,15 @@
if [ "$HOST" = "n" ]; then
ISA=$(adb shell ls -F /data/dalvik-cache | grep -Ewo "${ARCHITECTURES_PATTERN}")
+ outcome=$?
if [ x"$ISA" = "x" ]; then
echo "Unable to determine architecture"
+ # Print a few things for helping diagnosing the problem.
+ echo "adb invocation outcome: $outcome"
+ echo $(adb shell ls -F /data/dalvik-cache)
+ echo $(adb shell ls /data/dalvik-cache)
+ echo ${ARCHITECTURES_PATTERN}
+ echo $(adb shell ls -F /data/dalvik-cache | grep -Ewo "${ARCHITECTURES_PATTERN}")
exit 1
fi
fi