Juergen Ributzka | a80dd08 | 2014-07-31 04:10:43 +0000 | [diff] [blame^] | 1 | ; RUN: llc -mtriple=arm64-apple-darwin -enable-misched=0 -mcpu=cyclone < %s | FileCheck %s |
| 2 | ; RUN: llc -mtriple=arm64-apple-darwin -enable-misched=0 -mcpu=cyclone -fast-isel -fast-isel-abort < %s | FileCheck %s |
Tim Northover | 00ed996 | 2014-03-29 10:18:08 +0000 | [diff] [blame] | 3 | |
| 4 | ; Trivial patchpoint codegen |
| 5 | ; |
| 6 | define i64 @trivial_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) { |
| 7 | entry: |
| 8 | ; CHECK-LABEL: trivial_patchpoint_codegen: |
Tim Northover | 970c4a8 | 2014-04-30 11:19:40 +0000 | [diff] [blame] | 9 | ; CHECK: movz x16, #0xdead, lsl #32 |
| 10 | ; CHECK-NEXT: movk x16, #0xbeef, lsl #16 |
| 11 | ; CHECK-NEXT: movk x16, #0xcafe |
Tim Northover | 00ed996 | 2014-03-29 10:18:08 +0000 | [diff] [blame] | 12 | ; CHECK-NEXT: blr x16 |
Tim Northover | 970c4a8 | 2014-04-30 11:19:40 +0000 | [diff] [blame] | 13 | ; CHECK: movz x16, #0xdead, lsl #32 |
| 14 | ; CHECK-NEXT: movk x16, #0xbeef, lsl #16 |
| 15 | ; CHECK-NEXT: movk x16, #0xcaff |
Tim Northover | 00ed996 | 2014-03-29 10:18:08 +0000 | [diff] [blame] | 16 | ; CHECK-NEXT: blr x16 |
| 17 | ; CHECK: ret |
| 18 | %resolveCall2 = inttoptr i64 244837814094590 to i8* |
| 19 | %result = tail call i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 2, i32 20, i8* %resolveCall2, i32 4, i64 %p1, i64 %p2, i64 %p3, i64 %p4) |
| 20 | %resolveCall3 = inttoptr i64 244837814094591 to i8* |
| 21 | tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 3, i32 20, i8* %resolveCall3, i32 2, i64 %p1, i64 %result) |
| 22 | ret i64 %result |
| 23 | } |
| 24 | |
| 25 | ; Caller frame metadata with stackmaps. This should not be optimized |
| 26 | ; as a leaf function. |
| 27 | ; |
| 28 | ; CHECK-LABEL: caller_meta_leaf |
Bradley Smith | 6f1aa59 | 2014-04-09 14:43:50 +0000 | [diff] [blame] | 29 | ; CHECK: mov x29, sp |
Tim Northover | 00ed996 | 2014-03-29 10:18:08 +0000 | [diff] [blame] | 30 | ; CHECK-NEXT: sub sp, sp, #32 |
| 31 | ; CHECK: Ltmp |
Bradley Smith | 6f1aa59 | 2014-04-09 14:43:50 +0000 | [diff] [blame] | 32 | ; CHECK: mov sp, x29 |
Tim Northover | 00ed996 | 2014-03-29 10:18:08 +0000 | [diff] [blame] | 33 | ; CHECK: ret |
| 34 | |
| 35 | define void @caller_meta_leaf() { |
| 36 | entry: |
| 37 | %metadata = alloca i64, i32 3, align 8 |
| 38 | store i64 11, i64* %metadata |
| 39 | store i64 12, i64* %metadata |
| 40 | store i64 13, i64* %metadata |
| 41 | call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 4, i32 0, i64* %metadata) |
| 42 | ret void |
| 43 | } |
| 44 | |
Tim Northover | 00ed996 | 2014-03-29 10:18:08 +0000 | [diff] [blame] | 45 | ; Test patchpoints reusing the same TargetConstant. |
| 46 | ; <rdar:15390785> Assertion failed: (CI.getNumArgOperands() >= NumArgs + 4) |
| 47 | ; There is no way to verify this, since it depends on memory allocation. |
| 48 | ; But I think it's useful to include as a working example. |
| 49 | define i64 @testLowerConstant(i64 %arg, i64 %tmp2, i64 %tmp10, i64* %tmp33, i64 %tmp79) { |
| 50 | entry: |
| 51 | %tmp80 = add i64 %tmp79, -16 |
| 52 | %tmp81 = inttoptr i64 %tmp80 to i64* |
| 53 | %tmp82 = load i64* %tmp81, align 8 |
| 54 | tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 14, i32 8, i64 %arg, i64 %tmp2, i64 %tmp10, i64 %tmp82) |
| 55 | tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 15, i32 32, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp82) |
| 56 | %tmp83 = load i64* %tmp33, align 8 |
| 57 | %tmp84 = add i64 %tmp83, -24 |
| 58 | %tmp85 = inttoptr i64 %tmp84 to i64* |
| 59 | %tmp86 = load i64* %tmp85, align 8 |
| 60 | tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 17, i32 8, i64 %arg, i64 %tmp10, i64 %tmp86) |
| 61 | tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 18, i32 32, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp86) |
| 62 | ret i64 10 |
| 63 | } |
| 64 | |
| 65 | ; Test small patchpoints that don't emit calls. |
| 66 | define void @small_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) { |
| 67 | entry: |
| 68 | ; CHECK-LABEL: small_patchpoint_codegen: |
| 69 | ; CHECK: Ltmp |
| 70 | ; CHECK: nop |
| 71 | ; CHECK-NEXT: nop |
| 72 | ; CHECK-NEXT: nop |
| 73 | ; CHECK-NEXT: nop |
| 74 | ; CHECK-NEXT: nop |
| 75 | ; CHECK-NEXT: ldp |
| 76 | ; CHECK-NEXT: ret |
| 77 | %result = tail call i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 5, i32 20, i8* null, i32 2, i64 %p1, i64 %p2) |
| 78 | ret void |
| 79 | } |
| 80 | |
Tim Northover | 00ed996 | 2014-03-29 10:18:08 +0000 | [diff] [blame] | 81 | declare void @llvm.experimental.stackmap(i64, i32, ...) |
| 82 | declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...) |
| 83 | declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...) |
Tim Northover | 47e003c | 2014-05-26 17:21:53 +0000 | [diff] [blame] | 84 | |