Reid Kleckner | f0915aa | 2014-05-15 20:11:28 +0000 | [diff] [blame] | 1 | ; RUN: opt < %s -inline -S | FileCheck %s |
Chris Lattner | 318e328 | 2005-05-06 06:47:37 +0000 | [diff] [blame] | 2 | |
Reid Kleckner | f0915aa | 2014-05-15 20:11:28 +0000 | [diff] [blame] | 3 | ; We have to apply the less restrictive TailCallKind of the call site being |
| 4 | ; inlined and any call sites cloned into the caller. |
Chris Lattner | 318e328 | 2005-05-06 06:47:37 +0000 | [diff] [blame] | 5 | |
Reid Kleckner | f0915aa | 2014-05-15 20:11:28 +0000 | [diff] [blame] | 6 | ; No tail marker after inlining, since test_capture_c captures an alloca. |
| 7 | ; CHECK: define void @test_capture_a( |
| 8 | ; CHECK-NOT: tail |
| 9 | ; CHECK: call void @test_capture_c( |
| 10 | |
| 11 | declare void @test_capture_c(i32*) |
| 12 | define internal void @test_capture_b(i32* %P) { |
| 13 | tail call void @test_capture_c(i32* %P) |
| 14 | ret void |
| 15 | } |
| 16 | define void @test_capture_a() { |
| 17 | %A = alloca i32 ; captured by test_capture_b |
| 18 | call void @test_capture_b(i32* %A) |
| 19 | ret void |
Chris Lattner | 318e328 | 2005-05-06 06:47:37 +0000 | [diff] [blame] | 20 | } |
| 21 | |
Reid Kleckner | f0915aa | 2014-05-15 20:11:28 +0000 | [diff] [blame] | 22 | ; No musttail marker after inlining, since the prototypes don't match. |
| 23 | ; CHECK: define void @test_proto_mismatch_a( |
| 24 | ; CHECK-NOT: musttail |
| 25 | ; CHECK: call void @test_proto_mismatch_c( |
| 26 | |
| 27 | declare void @test_proto_mismatch_c(i32*) |
| 28 | define internal void @test_proto_mismatch_b(i32* %p) { |
| 29 | musttail call void @test_proto_mismatch_c(i32* %p) |
| 30 | ret void |
| 31 | } |
| 32 | define void @test_proto_mismatch_a() { |
| 33 | call void @test_proto_mismatch_b(i32* null) |
| 34 | ret void |
Chris Lattner | 318e328 | 2005-05-06 06:47:37 +0000 | [diff] [blame] | 35 | } |
Tanya Lattner | 5640bd1 | 2008-03-01 09:15:35 +0000 | [diff] [blame] | 36 | |
Reid Kleckner | f0915aa | 2014-05-15 20:11:28 +0000 | [diff] [blame] | 37 | ; After inlining through a musttail call site, we need to keep musttail markers |
| 38 | ; to prevent unbounded stack growth. |
| 39 | ; CHECK: define void @test_musttail_basic_a( |
| 40 | ; CHECK: musttail call void @test_musttail_basic_c( |
| 41 | |
| 42 | declare void @test_musttail_basic_c(i32* %p) |
| 43 | define internal void @test_musttail_basic_b(i32* %p) { |
| 44 | musttail call void @test_musttail_basic_c(i32* %p) |
| 45 | ret void |
| 46 | } |
| 47 | define void @test_musttail_basic_a(i32* %p) { |
| 48 | musttail call void @test_musttail_basic_b(i32* %p) |
| 49 | ret void |
| 50 | } |
| 51 | |
Reid Kleckner | 900d46f | 2014-05-15 21:10:46 +0000 | [diff] [blame] | 52 | ; Don't insert lifetime end markers here, the lifetime is trivially over due |
| 53 | ; the return. |
| 54 | ; CHECK: define void @test_byval_a( |
| 55 | ; CHECK: musttail call void @test_byval_c( |
| 56 | ; CHECK-NEXT: ret void |
| 57 | |
| 58 | declare void @test_byval_c(i32* byval %p) |
| 59 | define internal void @test_byval_b(i32* byval %p) { |
| 60 | musttail call void @test_byval_c(i32* byval %p) |
| 61 | ret void |
| 62 | } |
| 63 | define void @test_byval_a(i32* byval %p) { |
| 64 | musttail call void @test_byval_b(i32* byval %p) |
| 65 | ret void |
| 66 | } |
| 67 | |
| 68 | ; Don't insert a stack restore, we're about to return. |
| 69 | ; CHECK: define void @test_dynalloca_a( |
| 70 | ; CHECK: call i8* @llvm.stacksave( |
| 71 | ; CHECK: alloca i8, i32 %n |
| 72 | ; CHECK: musttail call void @test_dynalloca_c( |
| 73 | ; CHECK-NEXT: ret void |
| 74 | |
| 75 | declare void @escape(i8* %buf) |
| 76 | declare void @test_dynalloca_c(i32* byval %p, i32 %n) |
| 77 | define internal void @test_dynalloca_b(i32* byval %p, i32 %n) alwaysinline { |
| 78 | %buf = alloca i8, i32 %n ; dynamic alloca |
| 79 | call void @escape(i8* %buf) ; escape it |
| 80 | musttail call void @test_dynalloca_c(i32* byval %p, i32 %n) |
| 81 | ret void |
| 82 | } |
| 83 | define void @test_dynalloca_a(i32* byval %p, i32 %n) { |
| 84 | musttail call void @test_dynalloca_b(i32* byval %p, i32 %n) |
| 85 | ret void |
| 86 | } |
| 87 | |
Reid Kleckner | f0915aa | 2014-05-15 20:11:28 +0000 | [diff] [blame] | 88 | ; We can't merge the returns. |
| 89 | ; CHECK: define void @test_multiret_a( |
| 90 | ; CHECK: musttail call void @test_multiret_c( |
| 91 | ; CHECK-NEXT: ret void |
| 92 | ; CHECK: musttail call void @test_multiret_d( |
| 93 | ; CHECK-NEXT: ret void |
| 94 | |
| 95 | declare void @test_multiret_c(i1 zeroext %b) |
| 96 | declare void @test_multiret_d(i1 zeroext %b) |
| 97 | define internal void @test_multiret_b(i1 zeroext %b) { |
| 98 | br i1 %b, label %c, label %d |
| 99 | c: |
| 100 | musttail call void @test_multiret_c(i1 zeroext %b) |
| 101 | ret void |
| 102 | d: |
| 103 | musttail call void @test_multiret_d(i1 zeroext %b) |
| 104 | ret void |
| 105 | } |
| 106 | define void @test_multiret_a(i1 zeroext %b) { |
| 107 | musttail call void @test_multiret_b(i1 zeroext %b) |
| 108 | ret void |
| 109 | } |
| 110 | |
| 111 | ; We have to avoid bitcast chains. |
| 112 | ; CHECK: define i32* @test_retptr_a( |
| 113 | ; CHECK: musttail call i8* @test_retptr_c( |
| 114 | ; CHECK-NEXT: bitcast i8* {{.*}} to i32* |
| 115 | ; CHECK-NEXT: ret i32* |
| 116 | |
| 117 | declare i8* @test_retptr_c() |
| 118 | define internal i16* @test_retptr_b() { |
| 119 | %rv = musttail call i8* @test_retptr_c() |
| 120 | %v = bitcast i8* %rv to i16* |
| 121 | ret i16* %v |
| 122 | } |
| 123 | define i32* @test_retptr_a() { |
| 124 | %rv = musttail call i16* @test_retptr_b() |
| 125 | %v = bitcast i16* %rv to i32* |
| 126 | ret i32* %v |
| 127 | } |
| 128 | |
| 129 | ; Combine the last two cases: multiple returns with pointer bitcasts. |
| 130 | ; CHECK: define i32* @test_multiptrret_a( |
| 131 | ; CHECK: musttail call i8* @test_multiptrret_c( |
| 132 | ; CHECK-NEXT: bitcast i8* {{.*}} to i32* |
| 133 | ; CHECK-NEXT: ret i32* |
| 134 | ; CHECK: musttail call i8* @test_multiptrret_d( |
| 135 | ; CHECK-NEXT: bitcast i8* {{.*}} to i32* |
| 136 | ; CHECK-NEXT: ret i32* |
| 137 | |
| 138 | declare i8* @test_multiptrret_c(i1 zeroext %b) |
| 139 | declare i8* @test_multiptrret_d(i1 zeroext %b) |
| 140 | define internal i16* @test_multiptrret_b(i1 zeroext %b) { |
| 141 | br i1 %b, label %c, label %d |
| 142 | c: |
| 143 | %c_rv = musttail call i8* @test_multiptrret_c(i1 zeroext %b) |
| 144 | %c_v = bitcast i8* %c_rv to i16* |
| 145 | ret i16* %c_v |
| 146 | d: |
| 147 | %d_rv = musttail call i8* @test_multiptrret_d(i1 zeroext %b) |
| 148 | %d_v = bitcast i8* %d_rv to i16* |
| 149 | ret i16* %d_v |
| 150 | } |
| 151 | define i32* @test_multiptrret_a(i1 zeroext %b) { |
| 152 | %rv = musttail call i16* @test_multiptrret_b(i1 zeroext %b) |
| 153 | %v = bitcast i16* %rv to i32* |
| 154 | ret i32* %v |
| 155 | } |
| 156 | |
| 157 | ; Inline a musttail call site which contains a normal return and a musttail call. |
| 158 | ; CHECK: define i32 @test_mixedret_a( |
| 159 | ; CHECK: br i1 %b |
| 160 | ; CHECK: musttail call i32 @test_mixedret_c( |
| 161 | ; CHECK-NEXT: ret i32 |
| 162 | ; CHECK: call i32 @test_mixedret_d(i1 zeroext %b) |
| 163 | ; CHECK: add i32 1, |
| 164 | ; CHECK-NOT: br |
| 165 | ; CHECK: ret i32 |
| 166 | |
| 167 | declare i32 @test_mixedret_c(i1 zeroext %b) |
| 168 | declare i32 @test_mixedret_d(i1 zeroext %b) |
| 169 | define internal i32 @test_mixedret_b(i1 zeroext %b) { |
| 170 | br i1 %b, label %c, label %d |
| 171 | c: |
| 172 | %c_rv = musttail call i32 @test_mixedret_c(i1 zeroext %b) |
| 173 | ret i32 %c_rv |
| 174 | d: |
| 175 | %d_rv = call i32 @test_mixedret_d(i1 zeroext %b) |
| 176 | %d_rv1 = add i32 1, %d_rv |
| 177 | ret i32 %d_rv1 |
| 178 | } |
| 179 | define i32 @test_mixedret_a(i1 zeroext %b) { |
| 180 | %rv = musttail call i32 @test_mixedret_b(i1 zeroext %b) |
| 181 | ret i32 %rv |
| 182 | } |