| Benjamin Kramer | 93c69ac | 2013-10-06 13:52:41 +0000 | [diff] [blame] | 1 | ; RUN: llc -mtriple=i386-unknown-freebsd -mcpu=core2 -stack-alignment=4 -relocation-model=pic < %s | FileCheck %s -check-prefix=UNALIGNED |
| 2 | ; RUN: llc -mtriple=i386-unknown-freebsd -mcpu=core2 -stack-alignment=16 -relocation-model=pic < %s | FileCheck %s -check-prefix=ALIGNED |
| 3 | ; RUN: llc -mtriple=i386-unknown-freebsd -mcpu=core2 -stack-alignment=4 -force-align-stack -relocation-model=pic < %s | FileCheck %s -check-prefix=FORCEALIGNED |
| Benjamin Kramer | 858a388 | 2013-10-06 13:48:22 +0000 | [diff] [blame] | 4 | |
| 5 | @arr = internal unnamed_addr global [32 x i32] zeroinitializer, align 16 |
| 6 | |
| 7 | ; PR12250 |
| 8 | define i32 @test1() { |
| 9 | vector.ph: |
| 10 | br label %vector.body |
| 11 | |
| 12 | vector.body: |
| 13 | %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] |
| 14 | %0 = getelementptr inbounds [32 x i32]* @arr, i32 0, i32 %index |
| 15 | %1 = bitcast i32* %0 to <4 x i32>* |
| 16 | %wide.load = load <4 x i32>* %1, align 16 |
| 17 | %2 = add nsw <4 x i32> %wide.load, <i32 10, i32 10, i32 10, i32 10> |
| 18 | %3 = xor <4 x i32> %2, <i32 123345, i32 123345, i32 123345, i32 123345> |
| 19 | %4 = add nsw <4 x i32> %3, <i32 112, i32 112, i32 112, i32 112> |
| 20 | %5 = xor <4 x i32> %4, <i32 543345, i32 543345, i32 543345, i32 543345> |
| 21 | %6 = add nsw <4 x i32> %5, <i32 73, i32 73, i32 73, i32 73> |
| 22 | %7 = xor <4 x i32> %6, <i32 345987, i32 345987, i32 345987, i32 345987> |
| 23 | %8 = add nsw <4 x i32> %7, <i32 48, i32 48, i32 48, i32 48> |
| 24 | %9 = xor <4 x i32> %8, <i32 123987, i32 123987, i32 123987, i32 123987> |
| 25 | store <4 x i32> %9, <4 x i32>* %1, align 16 |
| 26 | %index.next = add i32 %index, 4 |
| 27 | %10 = icmp eq i32 %index.next, 32 |
| 28 | br i1 %10, label %middle.block, label %vector.body |
| 29 | |
| 30 | middle.block: |
| 31 | ret i32 0 |
| 32 | |
| 33 | ; We can't fold the spill into a padd unless the stack is aligned. Just spilling |
| 34 | ; doesn't force stack realignment though |
| 35 | ; UNALIGNED-LABEL: @test1 |
| 36 | ; UNALIGNED-NOT: andl $-{{..}}, %esp |
| 37 | ; UNALIGNED: movdqu {{.*}} # 16-byte Folded Spill |
| 38 | ; UNALIGNED-NOT: paddd {{.*}} # 16-byte Folded Reload |
| 39 | |
| 40 | ; ALIGNED-LABEL: @test1 |
| 41 | ; ALIGNED-NOT: andl $-{{..}}, %esp |
| 42 | ; ALIGNED: movdqa {{.*}} # 16-byte Spill |
| 43 | ; ALIGNED: paddd {{.*}} # 16-byte Folded Reload |
| 44 | |
| 45 | ; FORCEALIGNED-LABEL: @test1 |
| 46 | ; FORCEALIGNED: andl $-{{..}}, %esp |
| 47 | ; FORCEALIGNED: movdqa {{.*}} # 16-byte Spill |
| 48 | ; FORCEALIGNED: paddd {{.*}} # 16-byte Folded Reload |
| 49 | } |