Dorit Nuzman | bf2c15b | 2016-10-31 13:17:31 +0000 | [diff] [blame] | 1 | ; RUN: opt -S -loop-vectorize -instcombine -force-vector-width=4 -force-vector-interleave=1 -enable-interleaved-mem-accesses=true < %s | FileCheck %s |
| 2 | |
| 3 | target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" |
| 4 | |
| 5 | ; Check that the interleaved-mem-access analysis currently does not create an |
| 6 | ; interleave group for access 'a' due to the possible pointer wrap-around. |
| 7 | ; |
| 8 | ; To begin with, in this test the candidate interleave group can be created |
| 9 | ; only when getPtrStride is called with Assume=true. Next, because |
| 10 | ; the interleave-group of the loads is not full (has gaps), we also need to check |
| 11 | ; for possible pointer wrapping. Here we currently use Assume=false and as a |
| 12 | ; result cannot prove the transformation is safe and therefore invalidate the |
| 13 | ; candidate interleave group. |
| 14 | ; |
| 15 | ; FIXME: This is a missed optimization. Once we use Assume=true here, we will |
| 16 | ; not have to invalidate the group. |
| 17 | |
| 18 | ; void func(unsigned * __restrict a, unsigned * __restrict b, unsigned char x, unsigned char y) { |
| 19 | ; int i = 0; |
| 20 | ; for (unsigned char index = x; i < y; index +=2, ++i) |
| 21 | ; b[i] = a[index] * 2; |
| 22 | ; |
| 23 | ; } |
| 24 | |
| 25 | ; CHECK: vector.body: |
| 26 | ; CHECK-NOT: %wide.vec = load <8 x i32>, <8 x i32>* {{.*}}, align 4 |
| 27 | ; CHECK-NOT: shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6> |
| 28 | |
| 29 | define void @_Z4funcPjS_hh(i32* noalias nocapture readonly %a, i32* noalias nocapture %b, i8 zeroext %x, i8 zeroext %y) local_unnamed_addr { |
| 30 | entry: |
| 31 | %cmp9 = icmp eq i8 %y, 0 |
| 32 | br i1 %cmp9, label %for.cond.cleanup, label %for.body.preheader |
| 33 | |
| 34 | for.body.preheader: |
| 35 | %wide.trip.count = zext i8 %y to i64 |
| 36 | br label %for.body |
| 37 | |
| 38 | for.cond.cleanup.loopexit: |
| 39 | br label %for.cond.cleanup |
| 40 | |
| 41 | for.cond.cleanup: |
| 42 | ret void |
| 43 | |
| 44 | for.body: |
| 45 | %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ] |
| 46 | %index.011 = phi i8 [ %add, %for.body ], [ %x, %for.body.preheader ] |
| 47 | %idxprom = zext i8 %index.011 to i64 |
| 48 | %arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom |
| 49 | %0 = load i32, i32* %arrayidx, align 4 |
| 50 | %mul = shl i32 %0, 1 |
| 51 | %arrayidx2 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv |
| 52 | store i32 %mul, i32* %arrayidx2, align 4 |
| 53 | %add = add i8 %index.011, 2 |
| 54 | %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 |
| 55 | %exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count |
| 56 | br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body |
| 57 | } |