Add invoke fast-path support for quickened invokes.
Quickened invokes had their own handler implementation.
Merge it into the generic DoInvoke which has the fast-path.
This speeds up arm64 golem interpreter benchmarks by 6%.
Test: test.py -b -r --interpreter --host --64
Change-Id: Icac9e073f61df67780242877179111ed7bee7154
diff --git a/runtime/interpreter/interpreter_switch_impl-inl.h b/runtime/interpreter/interpreter_switch_impl-inl.h
index c430de2..3c31c38 100644
--- a/runtime/interpreter/interpreter_switch_impl-inl.h
+++ b/runtime/interpreter/interpreter_switch_impl-inl.h
@@ -1749,15 +1749,15 @@
}
case Instruction::INVOKE_VIRTUAL_QUICK: {
PREAMBLE();
- bool success = DoInvokeVirtualQuick<false>(
- self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kVirtual, false, do_access_check, /*is_mterp=*/ false,
+ /*is_quick=*/ true>(self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
}
case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
PREAMBLE();
- bool success = DoInvokeVirtualQuick<true>(
- self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kVirtual, true, do_access_check, /*is_mterp=*/ false,
+ /*is_quick=*/ true>(self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
}