Stephen Hines | 36b5688 | 2014-04-23 16:57:46 -0700 | [diff] [blame] | 1 | ; RUN: llc < %s -mtriple=arm64-apple-darwin -enable-misched=0 | FileCheck %s |
| 2 | |
| 3 | ; Trivial patchpoint codegen |
| 4 | ; |
| 5 | define i64 @trivial_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) { |
| 6 | entry: |
| 7 | ; CHECK-LABEL: trivial_patchpoint_codegen: |
| 8 | ; CHECK: movz x16, #57005, lsl #32 |
| 9 | ; CHECK-NEXT: movk x16, #48879, lsl #16 |
| 10 | ; CHECK-NEXT: movk x16, #51966 |
| 11 | ; CHECK-NEXT: blr x16 |
| 12 | ; CHECK: movz x16, #57005, lsl #32 |
| 13 | ; CHECK-NEXT: movk x16, #48879, lsl #16 |
| 14 | ; CHECK-NEXT: movk x16, #51967 |
| 15 | ; CHECK-NEXT: blr x16 |
| 16 | ; CHECK: ret |
| 17 | %resolveCall2 = inttoptr i64 244837814094590 to i8* |
| 18 | %result = tail call i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 2, i32 20, i8* %resolveCall2, i32 4, i64 %p1, i64 %p2, i64 %p3, i64 %p4) |
| 19 | %resolveCall3 = inttoptr i64 244837814094591 to i8* |
| 20 | tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 3, i32 20, i8* %resolveCall3, i32 2, i64 %p1, i64 %result) |
| 21 | ret i64 %result |
| 22 | } |
| 23 | |
| 24 | ; Caller frame metadata with stackmaps. This should not be optimized |
| 25 | ; as a leaf function. |
| 26 | ; |
| 27 | ; CHECK-LABEL: caller_meta_leaf |
| 28 | ; CHECK: mov fp, sp |
| 29 | ; CHECK-NEXT: sub sp, sp, #32 |
| 30 | ; CHECK: Ltmp |
| 31 | ; CHECK: mov sp, fp |
| 32 | ; CHECK: ret |
| 33 | |
| 34 | define void @caller_meta_leaf() { |
| 35 | entry: |
| 36 | %metadata = alloca i64, i32 3, align 8 |
| 37 | store i64 11, i64* %metadata |
| 38 | store i64 12, i64* %metadata |
| 39 | store i64 13, i64* %metadata |
| 40 | call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 4, i32 0, i64* %metadata) |
| 41 | ret void |
| 42 | } |
| 43 | |
| 44 | ; Test the webkit_jscc calling convention. |
| 45 | ; One argument will be passed in register, the other will be pushed on the stack. |
| 46 | ; Return value in x0. |
| 47 | define void @jscall_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) { |
| 48 | entry: |
| 49 | ; CHECK-LABEL: jscall_patchpoint_codegen: |
| 50 | ; CHECK: Ltmp |
| 51 | ; CHECK: str x{{.+}}, [sp] |
| 52 | ; CHECK-NEXT: mov x0, x{{.+}} |
| 53 | ; CHECK: Ltmp |
| 54 | ; CHECK-NEXT: movz x16, #65535, lsl #32 |
| 55 | ; CHECK-NEXT: movk x16, #57005, lsl #16 |
| 56 | ; CHECK-NEXT: movk x16, #48879 |
| 57 | ; CHECK-NEXT: blr x16 |
| 58 | %resolveCall2 = inttoptr i64 281474417671919 to i8* |
| 59 | %result = tail call webkit_jscc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 5, i32 20, i8* %resolveCall2, i32 2, i64 %p4, i64 %p2) |
| 60 | %resolveCall3 = inttoptr i64 244837814038255 to i8* |
| 61 | tail call webkit_jscc void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 6, i32 20, i8* %resolveCall3, i32 2, i64 %p4, i64 %result) |
| 62 | ret void |
| 63 | } |
| 64 | |
| 65 | ; Test if the arguments are properly aligned and that we don't store undef arguments. |
| 66 | define i64 @jscall_patchpoint_codegen2(i64 %callee) { |
| 67 | entry: |
| 68 | ; CHECK-LABEL: jscall_patchpoint_codegen2: |
| 69 | ; CHECK: Ltmp |
| 70 | ; CHECK: orr x{{.+}}, xzr, #0x6 |
| 71 | ; CHECK-NEXT: str x{{.+}}, [sp, #24] |
| 72 | ; CHECK-NEXT: orr w{{.+}}, wzr, #0x4 |
| 73 | ; CHECK-NEXT: str w{{.+}}, [sp, #16] |
| 74 | ; CHECK-NEXT: orr x{{.+}}, xzr, #0x2 |
| 75 | ; CHECK-NEXT: str x{{.+}}, [sp] |
| 76 | ; CHECK: Ltmp |
| 77 | ; CHECK-NEXT: movz x16, #65535, lsl #32 |
| 78 | ; CHECK-NEXT: movk x16, #57005, lsl #16 |
| 79 | ; CHECK-NEXT: movk x16, #48879 |
| 80 | ; CHECK-NEXT: blr x16 |
| 81 | %call = inttoptr i64 281474417671919 to i8* |
| 82 | %result = call webkit_jscc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 7, i32 20, i8* %call, i32 6, i64 %callee, i64 2, i64 undef, i32 4, i32 undef, i64 6) |
| 83 | ret i64 %result |
| 84 | } |
| 85 | |
| 86 | ; Test if the arguments are properly aligned and that we don't store undef arguments. |
| 87 | define i64 @jscall_patchpoint_codegen3(i64 %callee) { |
| 88 | entry: |
| 89 | ; CHECK-LABEL: jscall_patchpoint_codegen3: |
| 90 | ; CHECK: Ltmp |
| 91 | ; CHECK: movz x{{.+}}, #10 |
| 92 | ; CHECK-NEXT: str x{{.+}}, [sp, #48] |
| 93 | ; CHECK-NEXT: orr w{{.+}}, wzr, #0x8 |
| 94 | ; CHECK-NEXT: str w{{.+}}, [sp, #36] |
| 95 | ; CHECK-NEXT: orr x{{.+}}, xzr, #0x6 |
| 96 | ; CHECK-NEXT: str x{{.+}}, [sp, #24] |
| 97 | ; CHECK-NEXT: orr w{{.+}}, wzr, #0x4 |
| 98 | ; CHECK-NEXT: str w{{.+}}, [sp, #16] |
| 99 | ; CHECK-NEXT: orr x{{.+}}, xzr, #0x2 |
| 100 | ; CHECK-NEXT: str x{{.+}}, [sp] |
| 101 | ; CHECK: Ltmp |
| 102 | ; CHECK-NEXT: movz x16, #65535, lsl #32 |
| 103 | ; CHECK-NEXT: movk x16, #57005, lsl #16 |
| 104 | ; CHECK-NEXT: movk x16, #48879 |
| 105 | ; CHECK-NEXT: blr x16 |
| 106 | %call = inttoptr i64 281474417671919 to i8* |
| 107 | %result = call webkit_jscc i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 7, i32 20, i8* %call, i32 10, i64 %callee, i64 2, i64 undef, i32 4, i32 undef, i64 6, i32 undef, i32 8, i32 undef, i64 10) |
| 108 | ret i64 %result |
| 109 | } |
| 110 | |
| 111 | ; Test patchpoints reusing the same TargetConstant. |
| 112 | ; <rdar:15390785> Assertion failed: (CI.getNumArgOperands() >= NumArgs + 4) |
| 113 | ; There is no way to verify this, since it depends on memory allocation. |
| 114 | ; But I think it's useful to include as a working example. |
| 115 | define i64 @testLowerConstant(i64 %arg, i64 %tmp2, i64 %tmp10, i64* %tmp33, i64 %tmp79) { |
| 116 | entry: |
| 117 | %tmp80 = add i64 %tmp79, -16 |
| 118 | %tmp81 = inttoptr i64 %tmp80 to i64* |
| 119 | %tmp82 = load i64* %tmp81, align 8 |
| 120 | tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 14, i32 8, i64 %arg, i64 %tmp2, i64 %tmp10, i64 %tmp82) |
| 121 | tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 15, i32 32, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp82) |
| 122 | %tmp83 = load i64* %tmp33, align 8 |
| 123 | %tmp84 = add i64 %tmp83, -24 |
| 124 | %tmp85 = inttoptr i64 %tmp84 to i64* |
| 125 | %tmp86 = load i64* %tmp85, align 8 |
| 126 | tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 17, i32 8, i64 %arg, i64 %tmp10, i64 %tmp86) |
| 127 | tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 18, i32 32, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp86) |
| 128 | ret i64 10 |
| 129 | } |
| 130 | |
| 131 | ; Test small patchpoints that don't emit calls. |
| 132 | define void @small_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) { |
| 133 | entry: |
| 134 | ; CHECK-LABEL: small_patchpoint_codegen: |
| 135 | ; CHECK: Ltmp |
| 136 | ; CHECK: nop |
| 137 | ; CHECK-NEXT: nop |
| 138 | ; CHECK-NEXT: nop |
| 139 | ; CHECK-NEXT: nop |
| 140 | ; CHECK-NEXT: nop |
| 141 | ; CHECK-NEXT: ldp |
| 142 | ; CHECK-NEXT: ret |
| 143 | %result = tail call i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 5, i32 20, i8* null, i32 2, i64 %p1, i64 %p2) |
| 144 | ret void |
| 145 | } |
| 146 | |
| 147 | ; Test that scratch registers are spilled around patchpoints |
| 148 | ; CHECK: InlineAsm End |
| 149 | ; CHECK-NEXT: mov x{{[0-9]+}}, x16 |
| 150 | ; CHECK-NEXT: mov x{{[0-9]+}}, x17 |
| 151 | ; CHECK-NEXT: Ltmp |
| 152 | ; CHECK-NEXT: nop |
| 153 | define void @clobberScratch(i32* %p) { |
| 154 | %v = load i32* %p |
| 155 | tail call void asm sideeffect "nop", "~{x0},~{x1},~{x2},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{x29},~{x30},~{x31}"() nounwind |
| 156 | tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 5, i32 20, i8* null, i32 0, i32* %p, i32 %v) |
| 157 | store i32 %v, i32* %p |
| 158 | ret void |
| 159 | } |
| 160 | |
| 161 | declare void @llvm.experimental.stackmap(i64, i32, ...) |
| 162 | declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...) |
| 163 | declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...) |