blob: 4bafdcd8e8d68c10906fd82203448408dbd3a8ad [file] [log] [blame]
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001//===-- X86JITInfo.cpp - Implement the JIT interfaces for the X86 target --===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file was developed by the LLVM research group and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the JIT interfaces for the X86 target.
11//
12//===----------------------------------------------------------------------===//
13
14#define DEBUG_TYPE "jit"
15#include "X86JITInfo.h"
16#include "X86Relocations.h"
17#include "X86Subtarget.h"
18#include "llvm/CodeGen/MachineCodeEmitter.h"
19#include "llvm/Config/alloca.h"
20#include <cstdlib>
21using namespace llvm;
22
23#ifdef _MSC_VER
24 extern "C" void *_AddressOfReturnAddress(void);
25 #pragma intrinsic(_AddressOfReturnAddress)
26#endif
27
28void X86JITInfo::replaceMachineCodeForFunction(void *Old, void *New) {
29 unsigned char *OldByte = (unsigned char *)Old;
30 *OldByte++ = 0xE9; // Emit JMP opcode.
31 unsigned *OldWord = (unsigned *)OldByte;
32 unsigned NewAddr = (intptr_t)New;
33 unsigned OldAddr = (intptr_t)OldWord;
34 *OldWord = NewAddr - OldAddr - 4; // Emit PC-relative addr of New code.
35}
36
37
38/// JITCompilerFunction - This contains the address of the JIT function used to
39/// compile a function lazily.
40static TargetJITInfo::JITCompilerFn JITCompilerFunction;
41
42// Get the ASMPREFIX for the current host. This is often '_'.
43#ifndef __USER_LABEL_PREFIX__
44#define __USER_LABEL_PREFIX__
45#endif
46#define GETASMPREFIX2(X) #X
47#define GETASMPREFIX(X) GETASMPREFIX2(X)
48#define ASMPREFIX GETASMPREFIX(__USER_LABEL_PREFIX__)
49
50// Provide a wrapper for X86CompilationCallback2 that saves non-traditional
51// callee saved registers, for the fastcc calling convention.
52extern "C" {
53#if defined(__x86_64__)
54 // No need to save EAX/EDX for X86-64.
55 void X86CompilationCallback(void);
56 asm(
57 ".text\n"
58 ".align 8\n"
59 ".globl " ASMPREFIX "X86CompilationCallback\n"
60 ASMPREFIX "X86CompilationCallback:\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +000061 ".cfi_startproc\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +000062 // Save RBP
63 "pushq %rbp\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +000064 ".cfi_def_cfa_offset 16\n"
65 ".cfi_offset %rbp, -16\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +000066 // Save RSP
67 "movq %rsp, %rbp\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +000068 ".cfi_def_cfa_register %rbp\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +000069 // Save all int arg registers
70 "pushq %rdi\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +000071 ".cfi_rel_offset %rdi, 0\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +000072 "pushq %rsi\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +000073 ".cfi_rel_offset %rsi, 8\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +000074 "pushq %rdx\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +000075 ".cfi_rel_offset %rdx, 16\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +000076 "pushq %rcx\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +000077 ".cfi_rel_offset %rcx, 24\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +000078 "pushq %r8\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +000079 ".cfi_rel_offset %r8, 32\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +000080 "pushq %r9\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +000081 ".cfi_rel_offset %r9, 40\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +000082 // Align stack on 16-byte boundary. ESP might not be properly aligned
83 // (8 byte) if this is called from an indirect stub.
84 "andq $-16, %rsp\n"
85 // Save all XMM arg registers
86 "subq $128, %rsp\n"
87 "movaps %xmm0, (%rsp)\n"
88 "movaps %xmm1, 16(%rsp)\n"
89 "movaps %xmm2, 32(%rsp)\n"
90 "movaps %xmm3, 48(%rsp)\n"
91 "movaps %xmm4, 64(%rsp)\n"
92 "movaps %xmm5, 80(%rsp)\n"
93 "movaps %xmm6, 96(%rsp)\n"
94 "movaps %xmm7, 112(%rsp)\n"
95 // JIT callee
96 "movq %rbp, %rdi\n" // Pass prev frame and return address
97 "movq 8(%rbp), %rsi\n"
98 "call " ASMPREFIX "X86CompilationCallback2\n"
99 // Restore all XMM arg registers
100 "movaps 112(%rsp), %xmm7\n"
101 "movaps 96(%rsp), %xmm6\n"
102 "movaps 80(%rsp), %xmm5\n"
103 "movaps 64(%rsp), %xmm4\n"
104 "movaps 48(%rsp), %xmm3\n"
105 "movaps 32(%rsp), %xmm2\n"
106 "movaps 16(%rsp), %xmm1\n"
107 "movaps (%rsp), %xmm0\n"
108 // Restore RSP
109 "movq %rbp, %rsp\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +0000110 ".cfi_def_cfa_register esp\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000111 // Restore all int arg registers
112 "subq $48, %rsp\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +0000113 ".cfi_adjust_cfa_offset 48\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000114 "popq %r9\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +0000115 ".cfi_adjust_cfa_offset -8\n"
116 ".cfi_restore %r9\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000117 "popq %r8\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +0000118 ".cfi_adjust_cfa_offset -8\n"
119 ".cfi_restore %r8\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000120 "popq %rcx\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +0000121 ".cfi_adjust_cfa_offset -8\n"
122 ".cfi_restore %rcx\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000123 "popq %rdx\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +0000124 ".cfi_adjust_cfa_offset -8\n"
125 ".cfi_restore %rdx\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000126 "popq %rsi\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +0000127 ".cfi_adjust_cfa_offset -8\n"
128 ".cfi_restore %rsi\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000129 "popq %rdi\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +0000130 ".cfi_adjust_cfa_offset -8\n"
131 ".cfi_restore %rdi\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000132 // Restore RBP
133 "popq %rbp\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +0000134 ".cfi_adjust_cfa_offset -8\n"
135 ".cfi_restore %rbp\n"
136 "ret\n"
137 ".cfi_endproc\n");
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000138#elif defined(__i386__) || defined(i386) || defined(_M_IX86)
139#ifndef _MSC_VER
140 void X86CompilationCallback(void);
141 asm(
142 ".text\n"
143 ".align 8\n"
144 ".globl " ASMPREFIX "X86CompilationCallback\n"
145 ASMPREFIX "X86CompilationCallback:\n"
Anton Korobeynikov67fc5112007-12-10 14:54:42 +0000146 ".cfi_startproc\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000147 "pushl %ebp\n"
Anton Korobeynikov67fc5112007-12-10 14:54:42 +0000148 ".cfi_def_cfa_offset 8\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +0000149 ".cfi_offset %ebp, -8\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000150 "movl %esp, %ebp\n" // Standard prologue
Anton Korobeynikova264a912007-12-10 15:27:07 +0000151 ".cfi_def_cfa_register %ebp\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000152 "pushl %eax\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +0000153 ".cfi_rel_offset %eax, 0\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000154 "pushl %edx\n" // Save EAX/EDX/ECX
Anton Korobeynikova264a912007-12-10 15:27:07 +0000155 ".cfi_rel_offset %edx, 4\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000156 "pushl %ecx\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +0000157 ".cfi_rel_offset %ecx, 8\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000158#if defined(__APPLE__)
159 "andl $-16, %esp\n" // Align ESP on 16-byte boundary
160#endif
161 "subl $16, %esp\n"
162 "movl 4(%ebp), %eax\n" // Pass prev frame and return address
163 "movl %eax, 4(%esp)\n"
164 "movl %ebp, (%esp)\n"
165 "call " ASMPREFIX "X86CompilationCallback2\n"
166 "movl %ebp, %esp\n" // Restore ESP
Anton Korobeynikova264a912007-12-10 15:27:07 +0000167 ".cfi_def_cfa_register %esp\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000168 "subl $12, %esp\n"
Anton Korobeynikov67fc5112007-12-10 14:54:42 +0000169 ".cfi_adjust_cfa_offset 12\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000170 "popl %ecx\n"
Anton Korobeynikov67fc5112007-12-10 14:54:42 +0000171 ".cfi_adjust_cfa_offset -4\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +0000172 ".cfi_restore %ecx\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000173 "popl %edx\n"
Anton Korobeynikov67fc5112007-12-10 14:54:42 +0000174 ".cfi_adjust_cfa_offset -4\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +0000175 ".cfi_restore %edx\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000176 "popl %eax\n"
Anton Korobeynikov67fc5112007-12-10 14:54:42 +0000177 ".cfi_adjust_cfa_offset -4\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +0000178 ".cfi_restore %eax\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000179 "popl %ebp\n"
Anton Korobeynikov67fc5112007-12-10 14:54:42 +0000180 ".cfi_adjust_cfa_offset -4\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +0000181 ".cfi_restore %ebp\n"
Anton Korobeynikov67fc5112007-12-10 14:54:42 +0000182 "ret\n"
183 ".cfi_endproc\n");
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000184
185 // Same as X86CompilationCallback but also saves XMM argument registers.
186 void X86CompilationCallback_SSE(void);
187 asm(
188 ".text\n"
189 ".align 8\n"
190 ".globl " ASMPREFIX "X86CompilationCallback_SSE\n"
191 ASMPREFIX "X86CompilationCallback_SSE:\n"
Anton Korobeynikov4d1e0732007-12-10 15:13:55 +0000192 ".cfi_startproc\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000193 "pushl %ebp\n"
Anton Korobeynikov4d1e0732007-12-10 15:13:55 +0000194 ".cfi_def_cfa_offset 8\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +0000195 ".cfi_offset %ebp, -8\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000196 "movl %esp, %ebp\n" // Standard prologue
Anton Korobeynikova264a912007-12-10 15:27:07 +0000197 ".cfi_def_cfa_register %ebp\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000198 "pushl %eax\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +0000199 ".cfi_rel_offset %eax, 0\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000200 "pushl %edx\n" // Save EAX/EDX/ECX
Anton Korobeynikova264a912007-12-10 15:27:07 +0000201 ".cfi_rel_offset %edx, 4\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000202 "pushl %ecx\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +0000203 ".cfi_rel_offset %ecx, 8\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000204 "andl $-16, %esp\n" // Align ESP on 16-byte boundary
205 // Save all XMM arg registers
206 "subl $64, %esp\n"
Anton Korobeynikov4d1e0732007-12-10 15:13:55 +0000207 // FIXME: provide frame move information for xmm registers.
208 // This can be tricky, because CFA register is ebp (unaligned)
209 // and we need to produce offsets relative to it.
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000210 "movaps %xmm0, (%esp)\n"
211 "movaps %xmm1, 16(%esp)\n"
212 "movaps %xmm2, 32(%esp)\n"
213 "movaps %xmm3, 48(%esp)\n"
214 "subl $16, %esp\n"
215 "movl 4(%ebp), %eax\n" // Pass prev frame and return address
216 "movl %eax, 4(%esp)\n"
217 "movl %ebp, (%esp)\n"
218 "call " ASMPREFIX "X86CompilationCallback2\n"
219 "addl $16, %esp\n"
220 "movaps 48(%esp), %xmm3\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +0000221 ".cfi_restore %xmm3\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000222 "movaps 32(%esp), %xmm2\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +0000223 ".cfi_restore %xmm2\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000224 "movaps 16(%esp), %xmm1\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +0000225 ".cfi_restore %xmm1\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000226 "movaps (%esp), %xmm0\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +0000227 ".cfi_restore %xmm0\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000228 "movl %ebp, %esp\n" // Restore ESP
Anton Korobeynikov4d1e0732007-12-10 15:13:55 +0000229 ".cfi_def_cfa_register esp\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000230 "subl $12, %esp\n"
Anton Korobeynikov4d1e0732007-12-10 15:13:55 +0000231 ".cfi_adjust_cfa_offset 12\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000232 "popl %ecx\n"
Anton Korobeynikov4d1e0732007-12-10 15:13:55 +0000233 ".cfi_adjust_cfa_offset -4\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +0000234 ".cfi_restore %ecx\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000235 "popl %edx\n"
Anton Korobeynikov4d1e0732007-12-10 15:13:55 +0000236 ".cfi_adjust_cfa_offset -4\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +0000237 ".cfi_restore %edx\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000238 "popl %eax\n"
Anton Korobeynikov4d1e0732007-12-10 15:13:55 +0000239 ".cfi_adjust_cfa_offset -4\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +0000240 ".cfi_restore %eax\n"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000241 "popl %ebp\n"
Anton Korobeynikov4d1e0732007-12-10 15:13:55 +0000242 ".cfi_adjust_cfa_offset -4\n"
Anton Korobeynikova264a912007-12-10 15:27:07 +0000243 ".cfi_restore %ebp\n"
Anton Korobeynikov4d1e0732007-12-10 15:13:55 +0000244 "ret\n"
245 ".cfi_endproc\n");
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000246#else
247 void X86CompilationCallback2(void);
248
249 _declspec(naked) void X86CompilationCallback(void) {
250 __asm {
251 push eax
252 push edx
253 push ecx
254 call X86CompilationCallback2
255 pop ecx
256 pop edx
257 pop eax
258 ret
259 }
260 }
261#endif // _MSC_VER
262
263#else // Not an i386 host
264 void X86CompilationCallback() {
265 assert(0 && "Cannot call X86CompilationCallback() on a non-x86 arch!\n");
266 abort();
267 }
268#endif
269}
270
271/// X86CompilationCallback - This is the target-specific function invoked by the
272/// function stub when we did not know the real target of a call. This function
273/// must locate the start of the stub or call site and pass it into the JIT
274/// compiler function.
275#ifdef _MSC_VER
276extern "C" void X86CompilationCallback2() {
277 assert(sizeof(size_t) == 4); // FIXME: handle Win64
278 intptr_t *RetAddrLoc = (intptr_t *)_AddressOfReturnAddress();
279 RetAddrLoc += 4; // skip over ret addr, edx, eax, ecx
280 intptr_t RetAddr = *RetAddrLoc;
281#else
282extern "C" void X86CompilationCallback2(intptr_t *StackPtr, intptr_t RetAddr) {
283 intptr_t *RetAddrLoc = &StackPtr[1];
284#endif
285 assert(*RetAddrLoc == RetAddr &&
286 "Could not find return address on the stack!");
287
288 // It's a stub if there is an interrupt marker after the call.
289 bool isStub = ((unsigned char*)RetAddr)[0] == 0xCD;
290
291 // The call instruction should have pushed the return value onto the stack...
292#ifdef __x86_64__
293 RetAddr--; // Backtrack to the reference itself...
294#else
295 RetAddr -= 4; // Backtrack to the reference itself...
296#endif
297
298#if 0
299 DOUT << "In callback! Addr=" << (void*)RetAddr
300 << " ESP=" << (void*)StackPtr
301 << ": Resolving call to function: "
302 << TheVM->getFunctionReferencedName((void*)RetAddr) << "\n";
303#endif
304
305 // Sanity check to make sure this really is a call instruction.
306#ifdef __x86_64__
307 assert(((unsigned char*)RetAddr)[-2] == 0x41 &&"Not a call instr!");
308 assert(((unsigned char*)RetAddr)[-1] == 0xFF &&"Not a call instr!");
309#else
310 assert(((unsigned char*)RetAddr)[-1] == 0xE8 &&"Not a call instr!");
311#endif
312
313 intptr_t NewVal = (intptr_t)JITCompilerFunction((void*)RetAddr);
314
315 // Rewrite the call target... so that we don't end up here every time we
316 // execute the call.
317#ifdef __x86_64__
318 *(intptr_t *)(RetAddr - 0xa) = NewVal;
319#else
320 *(intptr_t *)RetAddr = (intptr_t)(NewVal-RetAddr-4);
321#endif
322
323 if (isStub) {
324 // If this is a stub, rewrite the call into an unconditional branch
325 // instruction so that two return addresses are not pushed onto the stack
326 // when the requested function finally gets called. This also makes the
327 // 0xCD byte (interrupt) dead, so the marker doesn't effect anything.
328#ifdef __x86_64__
329 ((unsigned char*)RetAddr)[0] = (2 | (4 << 3) | (3 << 6));
330#else
331 ((unsigned char*)RetAddr)[-1] = 0xE9;
332#endif
333 }
334
335 // Change the return address to reexecute the call instruction...
336#ifdef __x86_64__
337 *RetAddrLoc -= 0xd;
338#else
339 *RetAddrLoc -= 5;
340#endif
341}
342
343TargetJITInfo::LazyResolverFn
344X86JITInfo::getLazyResolverFunction(JITCompilerFn F) {
345 JITCompilerFunction = F;
346
347#if (defined(__i386__) || defined(i386) || defined(_M_IX86)) && \
348 !defined(_MSC_VER) && !defined(__x86_64__)
349 unsigned EAX = 0, EBX = 0, ECX = 0, EDX = 0;
350 union {
351 unsigned u[3];
352 char c[12];
353 } text;
354
355 if (!X86::GetCpuIDAndInfo(0, &EAX, text.u+0, text.u+2, text.u+1)) {
356 // FIXME: support for AMD family of processors.
357 if (memcmp(text.c, "GenuineIntel", 12) == 0) {
358 X86::GetCpuIDAndInfo(0x1, &EAX, &EBX, &ECX, &EDX);
359 if ((EDX >> 25) & 0x1)
360 return X86CompilationCallback_SSE;
361 }
362 }
363#endif
364
365 return X86CompilationCallback;
366}
367
368void *X86JITInfo::emitFunctionStub(void *Fn, MachineCodeEmitter &MCE) {
369 // Note, we cast to intptr_t here to silence a -pedantic warning that
370 // complains about casting a function pointer to a normal pointer.
371#if (defined(__i386__) || defined(i386) || defined(_M_IX86)) && \
372 !defined(_MSC_VER) && !defined(__x86_64__)
373 bool NotCC = (Fn != (void*)(intptr_t)X86CompilationCallback &&
374 Fn != (void*)(intptr_t)X86CompilationCallback_SSE);
375#else
376 bool NotCC = Fn != (void*)(intptr_t)X86CompilationCallback;
377#endif
378 if (NotCC) {
379#ifdef __x86_64__
380 MCE.startFunctionStub(13, 4);
381 MCE.emitByte(0x49); // REX prefix
382 MCE.emitByte(0xB8+2); // movabsq r10
383 MCE.emitWordLE(((unsigned *)&Fn)[0]);
384 MCE.emitWordLE(((unsigned *)&Fn)[1]);
385 MCE.emitByte(0x41); // REX prefix
386 MCE.emitByte(0xFF); // jmpq *r10
387 MCE.emitByte(2 | (4 << 3) | (3 << 6));
388#else
389 MCE.startFunctionStub(5, 4);
390 MCE.emitByte(0xE9);
391 MCE.emitWordLE((intptr_t)Fn-MCE.getCurrentPCValue()-4);
392#endif
393 return MCE.finishFunctionStub(0);
394 }
395
396#ifdef __x86_64__
397 MCE.startFunctionStub(14, 4);
398 MCE.emitByte(0x49); // REX prefix
399 MCE.emitByte(0xB8+2); // movabsq r10
400 MCE.emitWordLE(((unsigned *)&Fn)[0]);
401 MCE.emitWordLE(((unsigned *)&Fn)[1]);
402 MCE.emitByte(0x41); // REX prefix
403 MCE.emitByte(0xFF); // callq *r10
404 MCE.emitByte(2 | (2 << 3) | (3 << 6));
405#else
406 MCE.startFunctionStub(6, 4);
407 MCE.emitByte(0xE8); // Call with 32 bit pc-rel destination...
408
409 MCE.emitWordLE((intptr_t)Fn-MCE.getCurrentPCValue()-4);
410#endif
411
412 MCE.emitByte(0xCD); // Interrupt - Just a marker identifying the stub!
413 return MCE.finishFunctionStub(0);
414}
415
416/// relocate - Before the JIT can run a block of code that has been emitted,
417/// it must rewrite the code to contain the actual addresses of any
418/// referenced global symbols.
419void X86JITInfo::relocate(void *Function, MachineRelocation *MR,
420 unsigned NumRelocs, unsigned char* GOTBase) {
421 for (unsigned i = 0; i != NumRelocs; ++i, ++MR) {
422 void *RelocPos = (char*)Function + MR->getMachineCodeOffset();
423 intptr_t ResultPtr = (intptr_t)MR->getResultPointer();
424 switch ((X86::RelocationType)MR->getRelocationType()) {
425 case X86::reloc_pcrel_word: {
426 // PC relative relocation, add the relocated value to the value already in
427 // memory, after we adjust it for where the PC is.
428 ResultPtr = ResultPtr-(intptr_t)RelocPos-4-MR->getConstantVal();
429 *((unsigned*)RelocPos) += (unsigned)ResultPtr;
430 break;
431 }
432 case X86::reloc_absolute_word:
433 // Absolute relocation, just add the relocated value to the value already
434 // in memory.
435 *((unsigned*)RelocPos) += (unsigned)ResultPtr;
436 break;
437 case X86::reloc_absolute_dword:
438 *((intptr_t*)RelocPos) += ResultPtr;
439 break;
440 }
441 }
442}