Chris Lattner | b892eec | 2009-07-23 04:33:02 +0000 | [diff] [blame] | 1 | ; RUN: llvm-as < %s | llc -mtriple=i686-apple-darwin9 -mattr=sse41 | FileCheck %s -check-prefix=X32 |
| 2 | ; RUN: llvm-as < %s | llc -mtriple=x86_64-apple-darwin9 -mattr=sse41 | FileCheck %s -check-prefix=X64 |
| 3 | |
Chris Lattner | 972d562 | 2009-07-23 04:39:09 +0000 | [diff] [blame^] | 4 | @g16 = external global i16 |
Chris Lattner | b892eec | 2009-07-23 04:33:02 +0000 | [diff] [blame] | 5 | |
Chris Lattner | 972d562 | 2009-07-23 04:39:09 +0000 | [diff] [blame^] | 6 | define <4 x i32> @pinsrd_1(i32 %s, <4 x i32> %tmp) nounwind { |
Chris Lattner | b892eec | 2009-07-23 04:33:02 +0000 | [diff] [blame] | 7 | %tmp1 = insertelement <4 x i32> %tmp, i32 %s, i32 1 |
| 8 | ret <4 x i32> %tmp1 |
Chris Lattner | 972d562 | 2009-07-23 04:39:09 +0000 | [diff] [blame^] | 9 | ; X32: pinsrd_1: |
Chris Lattner | b892eec | 2009-07-23 04:33:02 +0000 | [diff] [blame] | 10 | ; X32: pinsrd $1, 4(%esp), %xmm0 |
| 11 | |
Chris Lattner | 972d562 | 2009-07-23 04:39:09 +0000 | [diff] [blame^] | 12 | ; X64: pinsrd_1: |
Chris Lattner | b892eec | 2009-07-23 04:33:02 +0000 | [diff] [blame] | 13 | ; X64: pinsrd $1, %edi, %xmm0 |
| 14 | } |
| 15 | |
Chris Lattner | 972d562 | 2009-07-23 04:39:09 +0000 | [diff] [blame^] | 16 | define <16 x i8> @pinsrb_1(i8 %s, <16 x i8> %tmp) nounwind { |
Chris Lattner | b892eec | 2009-07-23 04:33:02 +0000 | [diff] [blame] | 17 | %tmp1 = insertelement <16 x i8> %tmp, i8 %s, i32 1 |
| 18 | ret <16 x i8> %tmp1 |
Chris Lattner | 972d562 | 2009-07-23 04:39:09 +0000 | [diff] [blame^] | 19 | ; X32: pinsrb_1: |
Chris Lattner | b892eec | 2009-07-23 04:33:02 +0000 | [diff] [blame] | 20 | ; X32: pinsrb $1, 4(%esp), %xmm0 |
| 21 | |
Chris Lattner | 972d562 | 2009-07-23 04:39:09 +0000 | [diff] [blame^] | 22 | ; X64: pinsrb_1: |
Chris Lattner | b892eec | 2009-07-23 04:33:02 +0000 | [diff] [blame] | 23 | ; X64: pinsrb $1, %edi, %xmm0 |
| 24 | } |
Chris Lattner | 972d562 | 2009-07-23 04:39:09 +0000 | [diff] [blame^] | 25 | |
| 26 | |
| 27 | define <2 x i64> @pmovsxbd_1(i32* %p) nounwind { |
| 28 | entry: |
| 29 | %0 = load i32* %p, align 4 |
| 30 | %1 = insertelement <4 x i32> undef, i32 %0, i32 0 |
| 31 | %2 = insertelement <4 x i32> %1, i32 0, i32 1 |
| 32 | %3 = insertelement <4 x i32> %2, i32 0, i32 2 |
| 33 | %4 = insertelement <4 x i32> %3, i32 0, i32 3 |
| 34 | %5 = bitcast <4 x i32> %4 to <16 x i8> |
| 35 | %6 = tail call <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8> %5) nounwind readnone |
| 36 | %7 = bitcast <4 x i32> %6 to <2 x i64> |
| 37 | ret <2 x i64> %7 |
| 38 | |
| 39 | ; X32: _pmovsxbd_1: |
| 40 | ; X32: movl 4(%esp), %eax |
| 41 | ; X32: pmovsxbd (%eax), %xmm0 |
| 42 | |
| 43 | ; X64: _pmovsxbd_1: |
| 44 | ; X64: pmovsxbd (%rdi), %xmm0 |
| 45 | } |
| 46 | |
| 47 | define <2 x i64> @pmovsxwd_1(i64* %p) nounwind readonly { |
| 48 | entry: |
| 49 | %0 = load i64* %p ; <i64> [#uses=1] |
| 50 | %tmp2 = insertelement <2 x i64> zeroinitializer, i64 %0, i32 0 ; <<2 x i64>> [#uses=1] |
| 51 | %1 = bitcast <2 x i64> %tmp2 to <8 x i16> ; <<8 x i16>> [#uses=1] |
| 52 | %2 = tail call <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16> %1) nounwind readnone ; <<4 x i32>> [#uses=1] |
| 53 | %3 = bitcast <4 x i32> %2 to <2 x i64> ; <<2 x i64>> [#uses=1] |
| 54 | ret <2 x i64> %3 |
| 55 | |
| 56 | ; X32: _pmovsxwd_1: |
| 57 | ; X32: movl 4(%esp), %eax |
| 58 | ; X32: pmovsxwd (%eax), %xmm0 |
| 59 | |
| 60 | ; X64: _pmovsxwd_1: |
| 61 | ; X64: pmovsxwd (%rdi), %xmm0 |
| 62 | } |
| 63 | |
| 64 | |
| 65 | |
| 66 | |
| 67 | define <2 x i64> @pmovzxbq_1() nounwind { |
| 68 | entry: |
| 69 | %0 = load i16* @g16, align 2 ; <i16> [#uses=1] |
| 70 | %1 = insertelement <8 x i16> undef, i16 %0, i32 0 ; <<8 x i16>> [#uses=1] |
| 71 | %2 = bitcast <8 x i16> %1 to <16 x i8> ; <<16 x i8>> [#uses=1] |
| 72 | %3 = tail call <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8> %2) nounwind readnone ; <<2 x i64>> [#uses=1] |
| 73 | ret <2 x i64> %3 |
| 74 | |
| 75 | ; X32: _pmovzxbq_1: |
| 76 | ; X32: movl L_g16$non_lazy_ptr, %eax |
| 77 | ; X32: pmovzxbq (%eax), %xmm0 |
| 78 | |
| 79 | ; X64: _pmovzxbq_1: |
| 80 | ; X64: movq _g16@GOTPCREL(%rip), %rax |
| 81 | ; X64: pmovzxbq (%rax), %xmm0 |
| 82 | } |
| 83 | |
| 84 | declare <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8>) nounwind readnone |
| 85 | declare <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16>) nounwind readnone |
| 86 | declare <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8>) nounwind readnone |