Eric Christopher | cee313d | 2019-04-17 04:52:47 +0000 | [diff] [blame] | 1 | ; RUN: opt -called-value-propagation -S < %s | FileCheck %s |
| 2 | |
| 3 | target triple = "aarch64-unknown-linux-gnueabi" |
| 4 | |
| 5 | |
| 6 | ; This test checks that we propagate the functions through arguments and attach |
| 7 | ; !callees metadata to the call. Such metadata can enable optimizations of this |
| 8 | ; code sequence. |
| 9 | ; |
| 10 | ; For example, the code below a illustrates a contrived sort-like algorithm |
| 11 | ; that accepts a pointer to a comparison function. Since the indirect call to |
| 12 | ; the comparison function has only two targets, the call can be promoted to two |
| 13 | ; direct calls using an if-then-else. The loop can then be unswitched and the |
| 14 | ; called functions inlined. This essentially produces two loops, once |
| 15 | ; specialized for each comparison. |
| 16 | ; |
| 17 | ; CHECK: %tmp3 = call i1 %cmp(i64* %tmp1, i64* %tmp2), !callees ![[MD:[0-9]+]] |
| 18 | ; CHECK: ![[MD]] = !{i1 (i64*, i64*)* @ugt, i1 (i64*, i64*)* @ule} |
| 19 | ; |
| 20 | define void @test_argument(i64* %x, i64 %n, i1 %flag) { |
| 21 | entry: |
| 22 | %tmp0 = sub i64 %n, 1 |
| 23 | br i1 %flag, label %then, label %else |
| 24 | |
| 25 | then: |
| 26 | call void @arrange_data(i64* %x, i64 %tmp0, i1 (i64*, i64*)* @ugt) |
| 27 | br label %merge |
| 28 | |
| 29 | else: |
| 30 | call void @arrange_data(i64* %x, i64 %tmp0, i1 (i64*, i64*)* @ule) |
| 31 | br label %merge |
| 32 | |
| 33 | merge: |
| 34 | ret void |
| 35 | } |
| 36 | |
| 37 | define internal void @arrange_data(i64* %x, i64 %n, i1 (i64*, i64*)* %cmp) { |
| 38 | entry: |
| 39 | %tmp0 = icmp eq i64 %n, 1 |
| 40 | br i1 %tmp0, label %merge, label %for.body |
| 41 | |
| 42 | for.body: |
| 43 | %i = phi i64 [ 0, %entry ], [ %i.next, %cmp.false ] |
| 44 | %i.next = add nuw nsw i64 %i, 1 |
| 45 | %tmp1 = getelementptr inbounds i64, i64* %x, i64 %i |
| 46 | %tmp2 = getelementptr inbounds i64, i64* %x, i64 %i.next |
| 47 | %tmp3 = call i1 %cmp(i64* %tmp1, i64* %tmp2) |
| 48 | br i1 %tmp3, label %cmp.true, label %cmp.false |
| 49 | |
| 50 | cmp.true: |
| 51 | call void @swap(i64* %tmp1, i64* %tmp2) |
| 52 | br label %cmp.false |
| 53 | |
| 54 | cmp.false: |
| 55 | %cond = icmp slt i64 %i.next, %n |
| 56 | br i1 %cond, label %for.body, label %for.end |
| 57 | |
| 58 | for.end: |
| 59 | %tmp4 = sub i64 %n, 1 |
| 60 | call void @arrange_data(i64* %x, i64 %tmp4, i1 (i64*, i64*)* %cmp) |
| 61 | br label %merge |
| 62 | |
| 63 | merge: |
| 64 | ret void |
| 65 | } |
| 66 | |
| 67 | define internal i1 @ugt(i64* %a, i64* %b) { |
| 68 | entry: |
| 69 | %tmp0 = load i64, i64* %a |
| 70 | %tmp1 = load i64, i64* %b |
| 71 | %tmp2 = icmp ugt i64 %tmp0, %tmp1 |
| 72 | ret i1 %tmp2 |
| 73 | } |
| 74 | |
| 75 | define internal i1 @ule(i64* %a, i64* %b) { |
| 76 | entry: |
| 77 | %tmp0 = load i64, i64* %a |
| 78 | %tmp1 = load i64, i64* %b |
| 79 | %tmp2 = icmp ule i64 %tmp0, %tmp1 |
| 80 | ret i1 %tmp2 |
| 81 | } |
| 82 | |
| 83 | declare void @swap(i64*, i64*) |