Jakob Stoklund Olesen | 87f7864 | 2012-09-17 23:03:25 +0000 | [diff] [blame] | 1 | ; RUN: llc < %s -mcpu=cortex-a9 -new-coalescer | FileCheck %s |
Jakob Stoklund Olesen | 83b3a29 | 2012-05-15 23:31:35 +0000 | [diff] [blame] | 2 | target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32" |
| 3 | target triple = "thumbv7-apple-ios0.0.0" |
| 4 | |
| 5 | ; CHECK: f |
| 6 | ; The vld2 and vst2 are not aligned wrt each other, the second Q loaded is the |
| 7 | ; first one stored. |
| 8 | ; The coalescer must find a super-register larger than QQ to eliminate the copy |
| 9 | ; setting up the vst2 data. |
| 10 | ; CHECK: vld2 |
| 11 | ; CHECK-NOT: vorr |
| 12 | ; CHECK-NOT: vmov |
| 13 | ; CHECK: vst2 |
| 14 | define void @f(float* %p, i32 %c) nounwind ssp { |
| 15 | entry: |
| 16 | %0 = bitcast float* %p to i8* |
| 17 | %vld2 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8* %0, i32 4) |
| 18 | %vld221 = extractvalue { <4 x float>, <4 x float> } %vld2, 1 |
| 19 | %add.ptr = getelementptr inbounds float* %p, i32 8 |
| 20 | %1 = bitcast float* %add.ptr to i8* |
| 21 | tail call void @llvm.arm.neon.vst2.v4f32(i8* %1, <4 x float> %vld221, <4 x float> undef, i32 4) |
| 22 | ret void |
| 23 | } |
| 24 | |
| 25 | ; CHECK: f1 |
| 26 | ; FIXME: This function still has copies. |
| 27 | define void @f1(float* %p, i32 %c) nounwind ssp { |
| 28 | entry: |
| 29 | %0 = bitcast float* %p to i8* |
| 30 | %vld2 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8* %0, i32 4) |
| 31 | %vld221 = extractvalue { <4 x float>, <4 x float> } %vld2, 1 |
| 32 | %add.ptr = getelementptr inbounds float* %p, i32 8 |
| 33 | %1 = bitcast float* %add.ptr to i8* |
| 34 | %vld22 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8* %1, i32 4) |
| 35 | %vld2215 = extractvalue { <4 x float>, <4 x float> } %vld22, 0 |
| 36 | tail call void @llvm.arm.neon.vst2.v4f32(i8* %1, <4 x float> %vld221, <4 x float> %vld2215, i32 4) |
| 37 | ret void |
| 38 | } |
| 39 | |
| 40 | ; CHECK: f2 |
| 41 | ; FIXME: This function still has copies. |
| 42 | define void @f2(float* %p, i32 %c) nounwind ssp { |
| 43 | entry: |
| 44 | %0 = bitcast float* %p to i8* |
| 45 | %vld2 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8* %0, i32 4) |
| 46 | %vld224 = extractvalue { <4 x float>, <4 x float> } %vld2, 1 |
| 47 | br label %do.body |
| 48 | |
| 49 | do.body: ; preds = %do.body, %entry |
| 50 | %qq0.0.1.0 = phi <4 x float> [ %vld224, %entry ], [ %vld2216, %do.body ] |
| 51 | %c.addr.0 = phi i32 [ %c, %entry ], [ %dec, %do.body ] |
| 52 | %p.addr.0 = phi float* [ %p, %entry ], [ %add.ptr, %do.body ] |
| 53 | %add.ptr = getelementptr inbounds float* %p.addr.0, i32 8 |
| 54 | %1 = bitcast float* %add.ptr to i8* |
| 55 | %vld22 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8* %1, i32 4) |
| 56 | %vld2215 = extractvalue { <4 x float>, <4 x float> } %vld22, 0 |
| 57 | %vld2216 = extractvalue { <4 x float>, <4 x float> } %vld22, 1 |
| 58 | tail call void @llvm.arm.neon.vst2.v4f32(i8* %1, <4 x float> %qq0.0.1.0, <4 x float> %vld2215, i32 4) |
| 59 | %dec = add nsw i32 %c.addr.0, -1 |
| 60 | %tobool = icmp eq i32 %dec, 0 |
| 61 | br i1 %tobool, label %do.end, label %do.body |
| 62 | |
| 63 | do.end: ; preds = %do.body |
| 64 | ret void |
| 65 | } |
| 66 | |
| 67 | declare { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8*, i32) nounwind readonly |
| 68 | declare void @llvm.arm.neon.vst2.v4f32(i8*, <4 x float>, <4 x float>, i32) nounwind |
Jakob Stoklund Olesen | 87f7864 | 2012-09-17 23:03:25 +0000 | [diff] [blame] | 69 | |
| 70 | ; CHECK: f3 |
| 71 | ; This function has lane insertions that span basic blocks. |
| 72 | ; The trivial REG_SEQUENCE lowering can't handle that, but the coalescer can. |
| 73 | ; |
| 74 | ; void f3(float *p, float *q) { |
| 75 | ; float32x2_t x; |
| 76 | ; x[1] = p[3]; |
| 77 | ; if (q) |
| 78 | ; x[0] = q[0] + q[1]; |
| 79 | ; else |
| 80 | ; x[0] = p[2]; |
| 81 | ; vst1_f32(p+4, x); |
| 82 | ; } |
| 83 | ; |
| 84 | ; CHECK-NOT: vmov |
| 85 | ; CHECK-NOT: vorr |
| 86 | define void @f3(float* %p, float* %q) nounwind ssp { |
| 87 | entry: |
| 88 | %arrayidx = getelementptr inbounds float* %p, i32 3 |
| 89 | %0 = load float* %arrayidx, align 4 |
| 90 | %vecins = insertelement <2 x float> undef, float %0, i32 1 |
| 91 | %tobool = icmp eq float* %q, null |
| 92 | br i1 %tobool, label %if.else, label %if.then |
| 93 | |
| 94 | if.then: ; preds = %entry |
| 95 | %1 = load float* %q, align 4 |
| 96 | %arrayidx2 = getelementptr inbounds float* %q, i32 1 |
| 97 | %2 = load float* %arrayidx2, align 4 |
| 98 | %add = fadd float %1, %2 |
| 99 | %vecins3 = insertelement <2 x float> %vecins, float %add, i32 0 |
| 100 | br label %if.end |
| 101 | |
| 102 | if.else: ; preds = %entry |
| 103 | %arrayidx4 = getelementptr inbounds float* %p, i32 2 |
| 104 | %3 = load float* %arrayidx4, align 4 |
| 105 | %vecins5 = insertelement <2 x float> %vecins, float %3, i32 0 |
| 106 | br label %if.end |
| 107 | |
| 108 | if.end: ; preds = %if.else, %if.then |
| 109 | %x.0 = phi <2 x float> [ %vecins3, %if.then ], [ %vecins5, %if.else ] |
| 110 | %add.ptr = getelementptr inbounds float* %p, i32 4 |
| 111 | %4 = bitcast float* %add.ptr to i8* |
| 112 | tail call void @llvm.arm.neon.vst1.v2f32(i8* %4, <2 x float> %x.0, i32 4) |
| 113 | ret void |
| 114 | } |
| 115 | |
| 116 | declare void @llvm.arm.neon.vst1.v2f32(i8*, <2 x float>, i32) nounwind |