Switch-interpreter: Use more helpers for opcode arguments.
Second half of the rename. The old VReg methods returned a variety
of integer type (sign/size) so this CL isn't necessarily trivial.
Test: diff libart.so disassembly between PS1 and PS2
Change-Id: Ic41492e4e86e950b8cc2fb9c0b5ef68b97763811
diff --git a/runtime/interpreter/interpreter_switch_impl-inl.h b/runtime/interpreter/interpreter_switch_impl-inl.h
index 7553527..4525f7c 100644
--- a/runtime/interpreter/interpreter_switch_impl-inl.h
+++ b/runtime/interpreter/interpreter_switch_impl-inl.h
@@ -547,7 +547,7 @@
if (!HandleMonitorChecks()) {
return;
}
- const size_t ref_idx = inst->VRegA_11x(inst_data);
+ const size_t ref_idx = A();
ObjPtr<mirror::Object> obj_result = shadow_frame.GetVRegReference(ref_idx);
if (do_assignability_check && obj_result != nullptr) {
ObjPtr<mirror::Class> return_type = shadow_frame.GetMethod()->ResolveReturnType();
@@ -601,8 +601,8 @@
}
ALWAYS_INLINE void CONST_16() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint8_t dst = inst->VRegA_21s(inst_data);
- int16_t val = inst->VRegB_21s();
+ uint8_t dst = A();
+ int16_t val = B();
shadow_frame.SetVReg(dst, val);
if (val == 0) {
shadow_frame.SetVRegReference(dst, nullptr);
@@ -611,8 +611,8 @@
}
ALWAYS_INLINE void CONST() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint8_t dst = inst->VRegA_31i(inst_data);
- int32_t val = inst->VRegB_31i();
+ uint8_t dst = A();
+ int32_t val = B();
shadow_frame.SetVReg(dst, val);
if (val == 0) {
shadow_frame.SetVRegReference(dst, nullptr);
@@ -621,8 +621,8 @@
}
ALWAYS_INLINE void CONST_HIGH16() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint8_t dst = inst->VRegA_21h(inst_data);
- int32_t val = static_cast<int32_t>(inst->VRegB_21h() << 16);
+ uint8_t dst = A();
+ int32_t val = static_cast<int32_t>(B() << 16);
shadow_frame.SetVReg(dst, val);
if (val == 0) {
shadow_frame.SetVRegReference(dst, nullptr);
@@ -631,30 +631,30 @@
}
ALWAYS_INLINE void CONST_WIDE_16() REQUIRES_SHARED(Locks::mutator_lock_) {
- shadow_frame.SetVRegLong(A(), inst->VRegB_21s());
+ shadow_frame.SetVRegLong(A(), B());
inst = inst->Next_2xx();
}
ALWAYS_INLINE void CONST_WIDE_32() REQUIRES_SHARED(Locks::mutator_lock_) {
- shadow_frame.SetVRegLong(A(), inst->VRegB_31i());
+ shadow_frame.SetVRegLong(A(), B());
inst = inst->Next_3xx();
}
ALWAYS_INLINE void CONST_WIDE() REQUIRES_SHARED(Locks::mutator_lock_) {
- shadow_frame.SetVRegLong(A(), inst->VRegB_51l());
+ shadow_frame.SetVRegLong(A(), inst->WideVRegB());
inst = inst->Next_51l();
}
ALWAYS_INLINE void CONST_WIDE_HIGH16() REQUIRES_SHARED(Locks::mutator_lock_) {
shadow_frame.SetVRegLong(A(),
- static_cast<uint64_t>(inst->VRegB_21h()) << 48);
+ static_cast<uint64_t>(B()) << 48);
inst = inst->Next_2xx();
}
ALWAYS_INLINE void CONST_STRING() REQUIRES_SHARED(Locks::mutator_lock_) {
ObjPtr<mirror::String> s = ResolveString(self,
shadow_frame,
- dex::StringIndex(inst->VRegB_21c()));
+ dex::StringIndex(B()));
if (UNLIKELY(s == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
@@ -666,7 +666,7 @@
ALWAYS_INLINE void CONST_STRING_JUMBO() REQUIRES_SHARED(Locks::mutator_lock_) {
ObjPtr<mirror::String> s = ResolveString(self,
shadow_frame,
- dex::StringIndex(inst->VRegB_31c()));
+ dex::StringIndex(B()));
if (UNLIKELY(s == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
@@ -676,7 +676,7 @@
}
ALWAYS_INLINE void CONST_CLASS() REQUIRES_SHARED(Locks::mutator_lock_) {
- ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(inst->VRegB_21c()),
+ ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(B()),
shadow_frame.GetMethod(),
self,
false,
@@ -692,7 +692,7 @@
ALWAYS_INLINE void CONST_METHOD_HANDLE() REQUIRES_SHARED(Locks::mutator_lock_) {
ClassLinker* cl = Runtime::Current()->GetClassLinker();
ObjPtr<mirror::MethodHandle> mh = cl->ResolveMethodHandle(self,
- inst->VRegB_21c(),
+ B(),
shadow_frame.GetMethod());
if (UNLIKELY(mh == nullptr)) {
HANDLE_PENDING_EXCEPTION();
@@ -705,7 +705,7 @@
ALWAYS_INLINE void CONST_METHOD_TYPE() REQUIRES_SHARED(Locks::mutator_lock_) {
ClassLinker* cl = Runtime::Current()->GetClassLinker();
ObjPtr<mirror::MethodType> mt = cl->ResolveMethodType(self,
- dex::ProtoIndex(inst->VRegB_21c()),
+ dex::ProtoIndex(B()),
shadow_frame.GetMethod());
if (UNLIKELY(mt == nullptr)) {
HANDLE_PENDING_EXCEPTION();
@@ -744,7 +744,7 @@
}
ALWAYS_INLINE void CHECK_CAST() REQUIRES_SHARED(Locks::mutator_lock_) {
- ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(inst->VRegB_21c()),
+ ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(B()),
shadow_frame.GetMethod(),
self,
false,
@@ -763,7 +763,7 @@
}
ALWAYS_INLINE void INSTANCE_OF() REQUIRES_SHARED(Locks::mutator_lock_) {
- ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(inst->VRegC_22c()),
+ ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(C()),
shadow_frame.GetMethod(),
self,
false,
@@ -791,7 +791,7 @@
ALWAYS_INLINE void NEW_INSTANCE() REQUIRES_SHARED(Locks::mutator_lock_) {
ObjPtr<mirror::Object> obj = nullptr;
- ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(inst->VRegB_21c()),
+ ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(B()),
shadow_frame.GetMethod(),
self,
false,
@@ -823,7 +823,7 @@
ALWAYS_INLINE void NEW_ARRAY() REQUIRES_SHARED(Locks::mutator_lock_) {
int32_t length = shadow_frame.GetVReg(B());
ObjPtr<mirror::Object> obj = AllocArrayFromCode<do_access_check>(
- dex::TypeIndex(inst->VRegC_22c()),
+ dex::TypeIndex(C()),
length,
shadow_frame.GetMethod(),
self,
@@ -851,7 +851,7 @@
}
ALWAYS_INLINE void FILL_ARRAY_DATA() REQUIRES_SHARED(Locks::mutator_lock_) {
- const uint16_t* payload_addr = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
+ const uint16_t* payload_addr = reinterpret_cast<const uint16_t*>(inst) + B();
const Instruction::ArrayDataPayload* payload =
reinterpret_cast<const Instruction::ArrayDataPayload*>(payload_addr);
ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(A());
@@ -889,7 +889,7 @@
if (!HandleAsyncException()) {
return;
}
- int8_t offset = inst->VRegA_10t(inst_data);
+ int8_t offset = A();
BRANCH_INSTRUMENTATION(offset);
inst = inst->RelativeAt(offset);
HandleBackwardBranch(offset);
@@ -899,7 +899,7 @@
if (!HandleAsyncException()) {
return;
}
- int16_t offset = inst->VRegA_20t();
+ int16_t offset = A();
BRANCH_INSTRUMENTATION(offset);
inst = inst->RelativeAt(offset);
HandleBackwardBranch(offset);
@@ -909,7 +909,7 @@
if (!HandleAsyncException()) {
return;
}
- int32_t offset = inst->VRegA_30t();
+ int32_t offset = A();
BRANCH_INSTRUMENTATION(offset);
inst = inst->RelativeAt(offset);
HandleBackwardBranch(offset);
@@ -1015,7 +1015,7 @@
ALWAYS_INLINE void IF_EQ() REQUIRES_SHARED(Locks::mutator_lock_) {
if (shadow_frame.GetVReg(A()) ==
shadow_frame.GetVReg(B())) {
- int16_t offset = inst->VRegC_22t();
+ int16_t offset = C();
BRANCH_INSTRUMENTATION(offset);
inst = inst->RelativeAt(offset);
HandleBackwardBranch(offset);
@@ -1028,7 +1028,7 @@
ALWAYS_INLINE void IF_NE() REQUIRES_SHARED(Locks::mutator_lock_) {
if (shadow_frame.GetVReg(A()) !=
shadow_frame.GetVReg(B())) {
- int16_t offset = inst->VRegC_22t();
+ int16_t offset = C();
BRANCH_INSTRUMENTATION(offset);
inst = inst->RelativeAt(offset);
HandleBackwardBranch(offset);
@@ -1041,7 +1041,7 @@
ALWAYS_INLINE void IF_LT() REQUIRES_SHARED(Locks::mutator_lock_) {
if (shadow_frame.GetVReg(A()) <
shadow_frame.GetVReg(B())) {
- int16_t offset = inst->VRegC_22t();
+ int16_t offset = C();
BRANCH_INSTRUMENTATION(offset);
inst = inst->RelativeAt(offset);
HandleBackwardBranch(offset);
@@ -1054,7 +1054,7 @@
ALWAYS_INLINE void IF_GE() REQUIRES_SHARED(Locks::mutator_lock_) {
if (shadow_frame.GetVReg(A()) >=
shadow_frame.GetVReg(B())) {
- int16_t offset = inst->VRegC_22t();
+ int16_t offset = C();
BRANCH_INSTRUMENTATION(offset);
inst = inst->RelativeAt(offset);
HandleBackwardBranch(offset);
@@ -1067,7 +1067,7 @@
ALWAYS_INLINE void IF_GT() REQUIRES_SHARED(Locks::mutator_lock_) {
if (shadow_frame.GetVReg(A()) >
shadow_frame.GetVReg(B())) {
- int16_t offset = inst->VRegC_22t();
+ int16_t offset = C();
BRANCH_INSTRUMENTATION(offset);
inst = inst->RelativeAt(offset);
HandleBackwardBranch(offset);
@@ -1080,7 +1080,7 @@
ALWAYS_INLINE void IF_LE() REQUIRES_SHARED(Locks::mutator_lock_) {
if (shadow_frame.GetVReg(A()) <=
shadow_frame.GetVReg(B())) {
- int16_t offset = inst->VRegC_22t();
+ int16_t offset = C();
BRANCH_INSTRUMENTATION(offset);
inst = inst->RelativeAt(offset);
HandleBackwardBranch(offset);
@@ -1092,7 +1092,7 @@
ALWAYS_INLINE void IF_EQZ() REQUIRES_SHARED(Locks::mutator_lock_) {
if (shadow_frame.GetVReg(A()) == 0) {
- int16_t offset = inst->VRegB_21t();
+ int16_t offset = B();
BRANCH_INSTRUMENTATION(offset);
inst = inst->RelativeAt(offset);
HandleBackwardBranch(offset);
@@ -1104,7 +1104,7 @@
ALWAYS_INLINE void IF_NEZ() REQUIRES_SHARED(Locks::mutator_lock_) {
if (shadow_frame.GetVReg(A()) != 0) {
- int16_t offset = inst->VRegB_21t();
+ int16_t offset = B();
BRANCH_INSTRUMENTATION(offset);
inst = inst->RelativeAt(offset);
HandleBackwardBranch(offset);
@@ -1116,7 +1116,7 @@
ALWAYS_INLINE void IF_LTZ() REQUIRES_SHARED(Locks::mutator_lock_) {
if (shadow_frame.GetVReg(A()) < 0) {
- int16_t offset = inst->VRegB_21t();
+ int16_t offset = B();
BRANCH_INSTRUMENTATION(offset);
inst = inst->RelativeAt(offset);
HandleBackwardBranch(offset);
@@ -1128,7 +1128,7 @@
ALWAYS_INLINE void IF_GEZ() REQUIRES_SHARED(Locks::mutator_lock_) {
if (shadow_frame.GetVReg(A()) >= 0) {
- int16_t offset = inst->VRegB_21t();
+ int16_t offset = B();
BRANCH_INSTRUMENTATION(offset);
inst = inst->RelativeAt(offset);
HandleBackwardBranch(offset);
@@ -1140,7 +1140,7 @@
ALWAYS_INLINE void IF_GTZ() REQUIRES_SHARED(Locks::mutator_lock_) {
if (shadow_frame.GetVReg(A()) > 0) {
- int16_t offset = inst->VRegB_21t();
+ int16_t offset = B();
BRANCH_INSTRUMENTATION(offset);
inst = inst->RelativeAt(offset);
HandleBackwardBranch(offset);
@@ -1152,7 +1152,7 @@
ALWAYS_INLINE void IF_LEZ() REQUIRES_SHARED(Locks::mutator_lock_) {
if (shadow_frame.GetVReg(A()) <= 0) {
- int16_t offset = inst->VRegB_21t();
+ int16_t offset = B();
BRANCH_INSTRUMENTATION(offset);
inst = inst->RelativeAt(offset);
HandleBackwardBranch(offset);
@@ -1744,37 +1744,37 @@
ALWAYS_INLINE void NEG_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
shadow_frame.SetVReg(
- inst->VRegA_12x(inst_data), -shadow_frame.GetVReg(B()));
+ A(), -shadow_frame.GetVReg(B()));
inst = inst->Next_1xx();
}
ALWAYS_INLINE void NOT_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
shadow_frame.SetVReg(
- inst->VRegA_12x(inst_data), ~shadow_frame.GetVReg(B()));
+ A(), ~shadow_frame.GetVReg(B()));
inst = inst->Next_1xx();
}
ALWAYS_INLINE void NEG_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
shadow_frame.SetVRegLong(
- inst->VRegA_12x(inst_data), -shadow_frame.GetVRegLong(B()));
+ A(), -shadow_frame.GetVRegLong(B()));
inst = inst->Next_1xx();
}
ALWAYS_INLINE void NOT_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
shadow_frame.SetVRegLong(
- inst->VRegA_12x(inst_data), ~shadow_frame.GetVRegLong(B()));
+ A(), ~shadow_frame.GetVRegLong(B()));
inst = inst->Next_1xx();
}
ALWAYS_INLINE void NEG_FLOAT() REQUIRES_SHARED(Locks::mutator_lock_) {
shadow_frame.SetVRegFloat(
- inst->VRegA_12x(inst_data), -shadow_frame.GetVRegFloat(B()));
+ A(), -shadow_frame.GetVRegFloat(B()));
inst = inst->Next_1xx();
}
ALWAYS_INLINE void NEG_DOUBLE() REQUIRES_SHARED(Locks::mutator_lock_) {
shadow_frame.SetVRegDouble(
- inst->VRegA_12x(inst_data), -shadow_frame.GetVRegDouble(B()));
+ A(), -shadow_frame.GetVRegDouble(B()));
inst = inst->Next_1xx();
}
@@ -1894,14 +1894,14 @@
}
ALWAYS_INLINE void DIV_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
- bool success = DoIntDivide(shadow_frame, inst->VRegA_23x(inst_data),
+ bool success = DoIntDivide(shadow_frame, A(),
shadow_frame.GetVReg(B()),
shadow_frame.GetVReg(C()));
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
}
ALWAYS_INLINE void REM_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
- bool success = DoIntRemainder(shadow_frame, inst->VRegA_23x(inst_data),
+ bool success = DoIntRemainder(shadow_frame, A(),
shadow_frame.GetVReg(B()),
shadow_frame.GetVReg(C()));
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
@@ -1971,14 +1971,14 @@
}
ALWAYS_INLINE void DIV_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
- DoLongDivide(shadow_frame, inst->VRegA_23x(inst_data),
+ DoLongDivide(shadow_frame, A(),
shadow_frame.GetVRegLong(B()),
shadow_frame.GetVRegLong(C()));
POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_2xx);
}
ALWAYS_INLINE void REM_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
- DoLongRemainder(shadow_frame, inst->VRegA_23x(inst_data),
+ DoLongRemainder(shadow_frame, A(),
shadow_frame.GetVRegLong(B()),
shadow_frame.GetVRegLong(C()));
POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_2xx);
@@ -2097,14 +2097,14 @@
}
ALWAYS_INLINE void ADD_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
shadow_frame.SetVReg(vregA, SafeAdd(shadow_frame.GetVReg(vregA),
shadow_frame.GetVReg(B())));
inst = inst->Next_1xx();
}
ALWAYS_INLINE void SUB_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
shadow_frame.SetVReg(vregA,
SafeSub(shadow_frame.GetVReg(vregA),
shadow_frame.GetVReg(B())));
@@ -2112,7 +2112,7 @@
}
ALWAYS_INLINE void MUL_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
shadow_frame.SetVReg(vregA,
SafeMul(shadow_frame.GetVReg(vregA),
shadow_frame.GetVReg(B())));
@@ -2120,21 +2120,21 @@
}
ALWAYS_INLINE void DIV_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
bool success = DoIntDivide(shadow_frame, vregA, shadow_frame.GetVReg(vregA),
shadow_frame.GetVReg(B()));
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_1xx);
}
ALWAYS_INLINE void REM_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
bool success = DoIntRemainder(shadow_frame, vregA, shadow_frame.GetVReg(vregA),
shadow_frame.GetVReg(B()));
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_1xx);
}
ALWAYS_INLINE void SHL_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
shadow_frame.SetVReg(vregA,
shadow_frame.GetVReg(vregA) <<
(shadow_frame.GetVReg(B()) & 0x1f));
@@ -2142,7 +2142,7 @@
}
ALWAYS_INLINE void SHR_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
shadow_frame.SetVReg(vregA,
shadow_frame.GetVReg(vregA) >>
(shadow_frame.GetVReg(B()) & 0x1f));
@@ -2150,7 +2150,7 @@
}
ALWAYS_INLINE void USHR_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
shadow_frame.SetVReg(vregA,
static_cast<uint32_t>(shadow_frame.GetVReg(vregA)) >>
(shadow_frame.GetVReg(B()) & 0x1f));
@@ -2158,7 +2158,7 @@
}
ALWAYS_INLINE void AND_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
shadow_frame.SetVReg(vregA,
shadow_frame.GetVReg(vregA) &
shadow_frame.GetVReg(B()));
@@ -2166,7 +2166,7 @@
}
ALWAYS_INLINE void OR_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
shadow_frame.SetVReg(vregA,
shadow_frame.GetVReg(vregA) |
shadow_frame.GetVReg(B()));
@@ -2174,7 +2174,7 @@
}
ALWAYS_INLINE void XOR_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
shadow_frame.SetVReg(vregA,
shadow_frame.GetVReg(vregA) ^
shadow_frame.GetVReg(B()));
@@ -2182,7 +2182,7 @@
}
ALWAYS_INLINE void ADD_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
shadow_frame.SetVRegLong(vregA,
SafeAdd(shadow_frame.GetVRegLong(vregA),
shadow_frame.GetVRegLong(B())));
@@ -2190,7 +2190,7 @@
}
ALWAYS_INLINE void SUB_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
shadow_frame.SetVRegLong(vregA,
SafeSub(shadow_frame.GetVRegLong(vregA),
shadow_frame.GetVRegLong(B())));
@@ -2198,7 +2198,7 @@
}
ALWAYS_INLINE void MUL_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
shadow_frame.SetVRegLong(vregA,
SafeMul(shadow_frame.GetVRegLong(vregA),
shadow_frame.GetVRegLong(B())));
@@ -2206,21 +2206,21 @@
}
ALWAYS_INLINE void DIV_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
DoLongDivide(shadow_frame, vregA, shadow_frame.GetVRegLong(vregA),
shadow_frame.GetVRegLong(B()));
POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
}
ALWAYS_INLINE void REM_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
DoLongRemainder(shadow_frame, vregA, shadow_frame.GetVRegLong(vregA),
shadow_frame.GetVRegLong(B()));
POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
}
ALWAYS_INLINE void AND_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
shadow_frame.SetVRegLong(vregA,
shadow_frame.GetVRegLong(vregA) &
shadow_frame.GetVRegLong(B()));
@@ -2228,7 +2228,7 @@
}
ALWAYS_INLINE void OR_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
shadow_frame.SetVRegLong(vregA,
shadow_frame.GetVRegLong(vregA) |
shadow_frame.GetVRegLong(B()));
@@ -2236,7 +2236,7 @@
}
ALWAYS_INLINE void XOR_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
shadow_frame.SetVRegLong(vregA,
shadow_frame.GetVRegLong(vregA) ^
shadow_frame.GetVRegLong(B()));
@@ -2244,7 +2244,7 @@
}
ALWAYS_INLINE void SHL_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
shadow_frame.SetVRegLong(vregA,
shadow_frame.GetVRegLong(vregA) <<
(shadow_frame.GetVReg(B()) & 0x3f));
@@ -2252,7 +2252,7 @@
}
ALWAYS_INLINE void SHR_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
shadow_frame.SetVRegLong(vregA,
shadow_frame.GetVRegLong(vregA) >>
(shadow_frame.GetVReg(B()) & 0x3f));
@@ -2260,7 +2260,7 @@
}
ALWAYS_INLINE void USHR_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
shadow_frame.SetVRegLong(vregA,
static_cast<uint64_t>(shadow_frame.GetVRegLong(vregA)) >>
(shadow_frame.GetVReg(B()) & 0x3f));
@@ -2268,7 +2268,7 @@
}
ALWAYS_INLINE void ADD_FLOAT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
shadow_frame.SetVRegFloat(vregA,
shadow_frame.GetVRegFloat(vregA) +
shadow_frame.GetVRegFloat(B()));
@@ -2276,7 +2276,7 @@
}
ALWAYS_INLINE void SUB_FLOAT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
shadow_frame.SetVRegFloat(vregA,
shadow_frame.GetVRegFloat(vregA) -
shadow_frame.GetVRegFloat(B()));
@@ -2284,7 +2284,7 @@
}
ALWAYS_INLINE void MUL_FLOAT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
shadow_frame.SetVRegFloat(vregA,
shadow_frame.GetVRegFloat(vregA) *
shadow_frame.GetVRegFloat(B()));
@@ -2292,7 +2292,7 @@
}
ALWAYS_INLINE void DIV_FLOAT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
shadow_frame.SetVRegFloat(vregA,
shadow_frame.GetVRegFloat(vregA) /
shadow_frame.GetVRegFloat(B()));
@@ -2300,7 +2300,7 @@
}
ALWAYS_INLINE void REM_FLOAT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
shadow_frame.SetVRegFloat(vregA,
fmodf(shadow_frame.GetVRegFloat(vregA),
shadow_frame.GetVRegFloat(B())));
@@ -2308,7 +2308,7 @@
}
ALWAYS_INLINE void ADD_DOUBLE_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
shadow_frame.SetVRegDouble(vregA,
shadow_frame.GetVRegDouble(vregA) +
shadow_frame.GetVRegDouble(B()));
@@ -2316,7 +2316,7 @@
}
ALWAYS_INLINE void SUB_DOUBLE_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
shadow_frame.SetVRegDouble(vregA,
shadow_frame.GetVRegDouble(vregA) -
shadow_frame.GetVRegDouble(B()));
@@ -2324,7 +2324,7 @@
}
ALWAYS_INLINE void MUL_DOUBLE_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
shadow_frame.SetVRegDouble(vregA,
shadow_frame.GetVRegDouble(vregA) *
shadow_frame.GetVRegDouble(B()));
@@ -2332,7 +2332,7 @@
}
ALWAYS_INLINE void DIV_DOUBLE_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
shadow_frame.SetVRegDouble(vregA,
shadow_frame.GetVRegDouble(vregA) /
shadow_frame.GetVRegDouble(B()));
@@ -2340,7 +2340,7 @@
}
ALWAYS_INLINE void REM_DOUBLE_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
- uint4_t vregA = inst->VRegA_12x(inst_data);
+ uint4_t vregA = A();
shadow_frame.SetVRegDouble(vregA,
fmod(shadow_frame.GetVRegDouble(vregA),
shadow_frame.GetVRegDouble(B())));
@@ -2350,13 +2350,13 @@
ALWAYS_INLINE void ADD_INT_LIT16() REQUIRES_SHARED(Locks::mutator_lock_) {
shadow_frame.SetVReg(A(),
SafeAdd(shadow_frame.GetVReg(B()),
- inst->VRegC_22s()));
+ C()));
inst = inst->Next_2xx();
}
ALWAYS_INLINE void RSUB_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
shadow_frame.SetVReg(A(),
- SafeSub(inst->VRegC_22s(),
+ SafeSub(C(),
shadow_frame.GetVReg(B())));
inst = inst->Next_2xx();
}
@@ -2364,114 +2364,114 @@
ALWAYS_INLINE void MUL_INT_LIT16() REQUIRES_SHARED(Locks::mutator_lock_) {
shadow_frame.SetVReg(A(),
SafeMul(shadow_frame.GetVReg(B()),
- inst->VRegC_22s()));
+ C()));
inst = inst->Next_2xx();
}
ALWAYS_INLINE void DIV_INT_LIT16() REQUIRES_SHARED(Locks::mutator_lock_) {
- bool success = DoIntDivide(shadow_frame, inst->VRegA_22s(inst_data),
+ bool success = DoIntDivide(shadow_frame, A(),
shadow_frame.GetVReg(B()),
- inst->VRegC_22s());
+ C());
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
}
ALWAYS_INLINE void REM_INT_LIT16() REQUIRES_SHARED(Locks::mutator_lock_) {
- bool success = DoIntRemainder(shadow_frame, inst->VRegA_22s(inst_data),
+ bool success = DoIntRemainder(shadow_frame, A(),
shadow_frame.GetVReg(B()),
- inst->VRegC_22s());
+ C());
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
}
ALWAYS_INLINE void AND_INT_LIT16() REQUIRES_SHARED(Locks::mutator_lock_) {
shadow_frame.SetVReg(A(),
shadow_frame.GetVReg(B()) &
- inst->VRegC_22s());
+ C());
inst = inst->Next_2xx();
}
ALWAYS_INLINE void OR_INT_LIT16() REQUIRES_SHARED(Locks::mutator_lock_) {
shadow_frame.SetVReg(A(),
shadow_frame.GetVReg(B()) |
- inst->VRegC_22s());
+ C());
inst = inst->Next_2xx();
}
ALWAYS_INLINE void XOR_INT_LIT16() REQUIRES_SHARED(Locks::mutator_lock_) {
shadow_frame.SetVReg(A(),
shadow_frame.GetVReg(B()) ^
- inst->VRegC_22s());
+ C());
inst = inst->Next_2xx();
}
ALWAYS_INLINE void ADD_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
shadow_frame.SetVReg(A(),
- SafeAdd(shadow_frame.GetVReg(B()), inst->VRegC_22b()));
+ SafeAdd(shadow_frame.GetVReg(B()), C()));
inst = inst->Next_2xx();
}
ALWAYS_INLINE void RSUB_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
shadow_frame.SetVReg(A(),
- SafeSub(inst->VRegC_22b(), shadow_frame.GetVReg(B())));
+ SafeSub(C(), shadow_frame.GetVReg(B())));
inst = inst->Next_2xx();
}
ALWAYS_INLINE void MUL_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
shadow_frame.SetVReg(A(),
- SafeMul(shadow_frame.GetVReg(B()), inst->VRegC_22b()));
+ SafeMul(shadow_frame.GetVReg(B()), C()));
inst = inst->Next_2xx();
}
ALWAYS_INLINE void DIV_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
- bool success = DoIntDivide(shadow_frame, inst->VRegA_22b(inst_data),
- shadow_frame.GetVReg(B()), inst->VRegC_22b());
+ bool success = DoIntDivide(shadow_frame, A(),
+ shadow_frame.GetVReg(B()), C());
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
}
ALWAYS_INLINE void REM_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
- bool success = DoIntRemainder(shadow_frame, inst->VRegA_22b(inst_data),
- shadow_frame.GetVReg(B()), inst->VRegC_22b());
+ bool success = DoIntRemainder(shadow_frame, A(),
+ shadow_frame.GetVReg(B()), C());
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
}
ALWAYS_INLINE void AND_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
shadow_frame.SetVReg(A(),
shadow_frame.GetVReg(B()) &
- inst->VRegC_22b());
+ C());
inst = inst->Next_2xx();
}
ALWAYS_INLINE void OR_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
shadow_frame.SetVReg(A(),
shadow_frame.GetVReg(B()) |
- inst->VRegC_22b());
+ C());
inst = inst->Next_2xx();
}
ALWAYS_INLINE void XOR_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
shadow_frame.SetVReg(A(),
shadow_frame.GetVReg(B()) ^
- inst->VRegC_22b());
+ C());
inst = inst->Next_2xx();
}
ALWAYS_INLINE void SHL_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
shadow_frame.SetVReg(A(),
shadow_frame.GetVReg(B()) <<
- (inst->VRegC_22b() & 0x1f));
+ (C() & 0x1f));
inst = inst->Next_2xx();
}
ALWAYS_INLINE void SHR_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
shadow_frame.SetVReg(A(),
shadow_frame.GetVReg(B()) >>
- (inst->VRegC_22b() & 0x1f));
+ (C() & 0x1f));
inst = inst->Next_2xx();
}
ALWAYS_INLINE void USHR_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
shadow_frame.SetVReg(A(),
static_cast<uint32_t>(shadow_frame.GetVReg(B())) >>
- (inst->VRegC_22b() & 0x1f));
+ (C() & 0x1f));
inst = inst->Next_2xx();
}