Chris Lattner | b0af8ce | 2010-12-20 07:38:24 +0000 | [diff] [blame] | 1 | ; RUN: opt < %s -inline -S | FileCheck %s |
Chris Lattner | c93adca | 2008-01-11 06:09:30 +0000 | [diff] [blame] | 2 | |
| 3 | ; Inlining a byval struct should cause an explicit copy into an alloca. |
| 4 | |
| 5 | %struct.ss = type { i32, i64 } |
| 6 | @.str = internal constant [10 x i8] c"%d, %lld\0A\00" ; <[10 x i8]*> [#uses=1] |
| 7 | |
| 8 | define internal void @f(%struct.ss* byval %b) nounwind { |
| 9 | entry: |
| 10 | %tmp = getelementptr %struct.ss* %b, i32 0, i32 0 ; <i32*> [#uses=2] |
| 11 | %tmp1 = load i32* %tmp, align 4 ; <i32> [#uses=1] |
| 12 | %tmp2 = add i32 %tmp1, 1 ; <i32> [#uses=1] |
| 13 | store i32 %tmp2, i32* %tmp, align 4 |
| 14 | ret void |
| 15 | } |
| 16 | |
| 17 | declare i32 @printf(i8*, ...) nounwind |
| 18 | |
Chris Lattner | 5723359 | 2010-12-20 07:39:57 +0000 | [diff] [blame] | 19 | define i32 @test1() nounwind { |
Chris Lattner | c93adca | 2008-01-11 06:09:30 +0000 | [diff] [blame] | 20 | entry: |
| 21 | %S = alloca %struct.ss ; <%struct.ss*> [#uses=4] |
| 22 | %tmp1 = getelementptr %struct.ss* %S, i32 0, i32 0 ; <i32*> [#uses=1] |
| 23 | store i32 1, i32* %tmp1, align 8 |
| 24 | %tmp4 = getelementptr %struct.ss* %S, i32 0, i32 1 ; <i64*> [#uses=1] |
| 25 | store i64 2, i64* %tmp4, align 4 |
| 26 | call void @f( %struct.ss* byval %S ) nounwind |
| 27 | ret i32 0 |
Chris Lattner | 5723359 | 2010-12-20 07:39:57 +0000 | [diff] [blame] | 28 | ; CHECK: @test1() |
Chris Lattner | b0af8ce | 2010-12-20 07:38:24 +0000 | [diff] [blame] | 29 | ; CHECK: %b = alloca %struct.ss |
| 30 | ; CHECK: call void @llvm.memcpy |
Chris Lattner | 5723359 | 2010-12-20 07:39:57 +0000 | [diff] [blame] | 31 | ; CHECK: ret i32 0 |
| 32 | } |
| 33 | |
| 34 | ; Inlining a byval struct should NOT cause an explicit copy |
| 35 | ; into an alloca if the function is readonly |
| 36 | |
| 37 | define internal i32 @f2(%struct.ss* byval %b) nounwind readonly { |
| 38 | entry: |
| 39 | %tmp = getelementptr %struct.ss* %b, i32 0, i32 0 ; <i32*> [#uses=2] |
| 40 | %tmp1 = load i32* %tmp, align 4 ; <i32> [#uses=1] |
| 41 | %tmp2 = add i32 %tmp1, 1 ; <i32> [#uses=1] |
| 42 | ret i32 %tmp2 |
| 43 | } |
| 44 | |
| 45 | define i32 @test2() nounwind { |
| 46 | entry: |
| 47 | %S = alloca %struct.ss ; <%struct.ss*> [#uses=4] |
| 48 | %tmp1 = getelementptr %struct.ss* %S, i32 0, i32 0 ; <i32*> [#uses=1] |
| 49 | store i32 1, i32* %tmp1, align 8 |
| 50 | %tmp4 = getelementptr %struct.ss* %S, i32 0, i32 1 ; <i64*> [#uses=1] |
| 51 | store i64 2, i64* %tmp4, align 4 |
| 52 | %X = call i32 @f2( %struct.ss* byval %S ) nounwind |
| 53 | ret i32 %X |
| 54 | ; CHECK: @test2() |
| 55 | ; CHECK: %S = alloca %struct.ss |
| 56 | ; CHECK-NOT: call void @llvm.memcpy |
| 57 | ; CHECK: ret i32 |
Chris Lattner | c93adca | 2008-01-11 06:09:30 +0000 | [diff] [blame] | 58 | } |
Chris Lattner | 018fb76 | 2010-12-20 07:45:28 +0000 | [diff] [blame^] | 59 | |
| 60 | |
| 61 | ; Inlining a byval with an explicit alignment needs to use *at least* that |
| 62 | ; alignment on the generated alloca. |
| 63 | ; PR8769 |
| 64 | declare void @g3(%struct.ss* %p) |
| 65 | |
| 66 | define internal void @f3(%struct.ss* byval align 64 %b) nounwind { |
| 67 | call void @g3(%struct.ss* %b) ;; Could make alignment assumptions! |
| 68 | ret void |
| 69 | } |
| 70 | |
| 71 | define void @test3() nounwind { |
| 72 | entry: |
| 73 | %S = alloca %struct.ss, align 1 ;; May not be aligned. |
| 74 | call void @f3( %struct.ss* byval align 64 %S) nounwind |
| 75 | ret void |
| 76 | ; CHECK: @test3() |
| 77 | ; CHECK: %b = alloca %struct.ss, align 64 |
| 78 | ; CHECK: %S = alloca %struct.ss |
| 79 | ; CHECK: call void @llvm.memcpy |
| 80 | ; CHECK: call void @g3(%struct.ss* %b) |
| 81 | ; CHECK: ret void |
| 82 | } |