Entry point clean up.

Create set of entry points needed for image methods to avoid fix-up at load time:
 - interpreter - bridge to interpreter, bridge to compiled code
 - jni - dlsym lookup
 - quick - resolution and bridge to interpreter
 - portable - resolution and bridge to interpreter

Fix JNI work around to use JNI work around argument rewriting code that'd been
accidentally disabled.
Remove abstact method error stub, use interpreter bridge instead.
Consolidate trampoline (previously stub) generation in generic helper.
Simplify trampolines to jump directly into assembly code, keeps stack crawlable.
Dex: replace use of int with ThreadOffset for values that are thread offsets.
Tidy entry point routines between interpreter, jni, quick and portable.

Change-Id: I52a7c2bbb1b7e0ff8a3c3100b774212309d0828e
(cherry picked from commit 848871b4d8481229c32e0d048a9856e5a9a17ef9)
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 1c395de..6e3e55f 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -148,7 +148,7 @@
   NewLIR1(kX86StartOfMethod, rX86_ARG2);
   NewLIR2(kX86PcRelAdr, rX86_ARG1, reinterpret_cast<uintptr_t>(tab_rec));
   NewLIR2(kX86Add32RR, rX86_ARG1, rX86_ARG2);
-  CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rX86_ARG0,
+  CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayData), rX86_ARG0,
                           rX86_ARG1, true);
 }
 
@@ -165,7 +165,7 @@
   NewLIR3(kX86LockCmpxchgMR, rCX, mirror::Object::MonitorOffset().Int32Value(), rDX);
   LIR* branch = NewLIR2(kX86Jcc8, 0, kX86CondEq);
   // If lock is held, go the expensive route - artLockObjectFromCode(self, obj);
-  CallRuntimeHelperReg(QUICK_ENTRYPOINT_OFFSET(pLockObjectFromCode), rCX, true);
+  CallRuntimeHelperReg(QUICK_ENTRYPOINT_OFFSET(pLockObject), rCX, true);
   branch->target = NewLIR0(kPseudoTargetLabel);
 }
 
@@ -185,7 +185,7 @@
   LIR* branch2 = NewLIR1(kX86Jmp8, 0);
   branch->target = NewLIR0(kPseudoTargetLabel);
   // Otherwise, go the expensive route - UnlockObjectFromCode(obj);
-  CallRuntimeHelperReg(QUICK_ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rAX, true);
+  CallRuntimeHelperReg(QUICK_ENTRYPOINT_OFFSET(pUnlockObject), rAX, true);
   branch2->target = NewLIR0(kPseudoTargetLabel);
 }
 
@@ -243,7 +243,7 @@
   if (!skip_overflow_check) {
     // cmp rX86_SP, fs:[stack_end_]; jcc throw_launchpad
     LIR* tgt = RawLIR(0, kPseudoThrowTarget, kThrowStackOverflow, 0, 0, 0, 0);
-    OpRegThreadMem(kOpCmp, rX86_SP, Thread::StackEndOffset().Int32Value());
+    OpRegThreadMem(kOpCmp, rX86_SP, Thread::StackEndOffset());
     OpCondBranch(kCondUlt, tgt);
     // Remember branch target - will process later
     throw_launchpads_.Insert(tgt);
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index edb5ae5..21328d5 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -29,7 +29,7 @@
     // Required for target - codegen helpers.
     bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src,
                                     RegLocation rl_dest, int lit);
-    int LoadHelper(int offset);
+    int LoadHelper(ThreadOffset offset);
     LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
     LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
                                   int s_reg);
@@ -154,14 +154,14 @@
     LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value);
     LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2);
     LIR* OpTestSuspend(LIR* target);
-    LIR* OpThreadMem(OpKind op, int thread_offset);
+    LIR* OpThreadMem(OpKind op, ThreadOffset thread_offset);
     LIR* OpVldm(int rBase, int count);
     LIR* OpVstm(int rBase, int count);
     void OpLea(int rBase, int reg1, int reg2, int scale, int offset);
     void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi);
-    void OpTlsCmp(int offset, int val);
+    void OpTlsCmp(ThreadOffset offset, int val);
 
-    void OpRegThreadMem(OpKind op, int r_dest, int thread_offset);
+    void OpRegThreadMem(OpKind op, int r_dest, ThreadOffset thread_offset);
     void SpillCoreRegs();
     void UnSpillCoreRegs();
     static const X86EncodingMap EncodingMap[kX86Last];
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 0b4b4be..377d134 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -240,8 +240,8 @@
   NewLIR5(kX86Lea32RA, rBase, reg1, reg2, scale, offset);
 }
 
-void X86Mir2Lir::OpTlsCmp(int offset, int val) {
-  NewLIR2(kX86Cmp16TI8, offset, val);
+void X86Mir2Lir::OpTlsCmp(ThreadOffset offset, int val) {
+  NewLIR2(kX86Cmp16TI8, offset.Int32Value(), val);
 }
 
 bool X86Mir2Lir::GenInlinedCas32(CallInfo* info, bool need_write_barrier) {
@@ -285,7 +285,7 @@
 
 // Test suspend flag, return target of taken suspend branch
 LIR* X86Mir2Lir::OpTestSuspend(LIR* target) {
-  OpTlsCmp(Thread::ThreadFlagsOffset().Int32Value(), 0);
+  OpTlsCmp(Thread::ThreadFlagsOffset(), 0);
   return OpCondBranch((target == NULL) ? kCondNe : kCondEq, target);
 }
 
@@ -403,7 +403,7 @@
   StoreValueWide(rl_dest, rl_result);
 }
 
-void X86Mir2Lir::OpRegThreadMem(OpKind op, int r_dest, int thread_offset) {
+void X86Mir2Lir::OpRegThreadMem(OpKind op, int r_dest, ThreadOffset thread_offset) {
   X86OpCode opcode = kX86Bkpt;
   switch (op) {
   case kOpCmp: opcode = kX86Cmp32RT;  break;
@@ -412,7 +412,7 @@
     LOG(FATAL) << "Bad opcode: " << op;
     break;
   }
-  NewLIR2(opcode, r_dest, thread_offset);
+  NewLIR2(opcode, r_dest, thread_offset.Int32Value());
 }
 
 /*
@@ -532,7 +532,7 @@
 
   // Get the array's class.
   LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
-  CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+  CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElement), r_value,
                           r_array_class, true);
   // Redo LoadValues in case they didn't survive the call.
   LoadValueDirectFixed(rl_array, r_array);  // Reload array
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 2c9b3c8..699f3ae 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -524,7 +524,7 @@
 }
 
 // Not used in x86
-int X86Mir2Lir::LoadHelper(int offset) {
+int X86Mir2Lir::LoadHelper(ThreadOffset offset) {
   LOG(FATAL) << "Unexpected use of LoadHelper in x86";
   return INVALID_REG;
 }
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index e15995f..c519bfe 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -292,7 +292,7 @@
   return OpRegImm(op, r_dest, value);
 }
 
-LIR* X86Mir2Lir::OpThreadMem(OpKind op, int thread_offset) {
+LIR* X86Mir2Lir::OpThreadMem(OpKind op, ThreadOffset thread_offset) {
   X86OpCode opcode = kX86Bkpt;
   switch (op) {
     case kOpBlx: opcode = kX86CallT;  break;
@@ -300,7 +300,7 @@
       LOG(FATAL) << "Bad opcode: " << op;
       break;
   }
-  return NewLIR1(opcode, thread_offset);
+  return NewLIR1(opcode, thread_offset.Int32Value());
 }
 
 LIR* X86Mir2Lir::OpMem(OpKind op, int rBase, int disp) {