| Michael Zuckerman | 3eeac2d | 2016-10-10 05:48:56 +0000 | [diff] [blame] | 1 | ; RUN: llc < %s -march x86-64 -mtriple x86_64-unknown-linux-gnu -mattr +avx | FileCheck %s |
| 2 | ; RUN: llc < %s -march x86-64 -mtriple x86_64-unknown-linux-gnu -mattr +avx512f | FileCheck %s |
| 3 | |
| 4 | define <4 x float> @testXMM_1(<4 x float> %_xmm0, i64 %_l) { |
| 5 | ; CHECK: vmovhlps %xmm1, %xmm0, %xmm0 |
| 6 | entry: |
| 7 | %0 = tail call <4 x float> asm "vmovhlps $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(i64 %_l, <4 x float> %_xmm0) |
| 8 | ret <4 x float> %0 |
| 9 | } |
| 10 | |
| 11 | define <4 x float> @testXMM_2(<4 x float> %_xmm0, i64 %_l) { |
| 12 | ; CHECK: movapd %xmm0, %xmm0 |
| 13 | entry: |
| 14 | %0 = tail call <4 x float> asm "movapd $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(i64 %_l) |
| 15 | ret <4 x float> %0 |
| 16 | } |
| 17 | |
| 18 | define <4 x float> @testXMM_3(<4 x float> %_xmm0, i64 %_l) { |
| 19 | ; CHECK: vmovapd %xmm0, %xmm0 |
| 20 | entry: |
| 21 | %0 = tail call <4 x float> asm "vmovapd $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(i64 %_l) |
| 22 | ret <4 x float> %0 |
| 23 | } |
| 24 | |
| 25 | define <4 x float> @testXMM_4(<4 x float> %_xmm0, i64 %_l) { |
| 26 | ; CHECK: vmpsadbw $0, %xmm1, %xmm0, %xmm0 |
| 27 | entry: |
| 28 | %0 = tail call <4 x float> asm "vmpsadbw $$0, $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(i64 %_l, <4 x float> %_xmm0) |
| 29 | ret <4 x float> %0 |
| 30 | } |
| 31 | |
| 32 | define <4 x float> @testXMM_5(<4 x float> %_xmm0, i64 %_l) { |
| 33 | ; CHECK: vminpd %xmm0, %xmm0, %xmm0 |
| 34 | entry: |
| 35 | %0 = tail call <4 x float> asm "vminpd $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(i64 %_l, i64 %_l) |
| 36 | ret <4 x float> %0 |
| 37 | } |
| 38 | |
| 39 | define i64 @testXMM_6(i64 returned %_l) { |
| 40 | ; CHECK: vmovd %xmm0, %eax |
| 41 | entry: |
| 42 | tail call void asm sideeffect "vmovd $0, %eax", "v,~{dirflag},~{fpsr},~{flags}"(i64 %_l) |
| 43 | ret i64 %_l |
| 44 | } |
| 45 | |
| 46 | define <4 x float> @testXMM_7(<4 x float> returned %_xmm0) { |
| 47 | ; CHECK: vmovmskps %xmm0, %eax |
| 48 | entry: |
| 49 | tail call void asm sideeffect "vmovmskps $0, %rax", "v,~{dirflag},~{fpsr},~{flags}"(<4 x float> %_xmm0) |
| 50 | ret <4 x float> %_xmm0 |
| 51 | } |
| 52 | |
| 53 | define i64 @testXMM_8(<4 x float> %_xmm0, i64 %_l) { |
| 54 | ; CHECK: vmulsd %xmm1, %xmm0, %xmm0 |
| 55 | entry: |
| 56 | %0 = tail call i64 asm "vmulsd $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(i64 %_l, <4 x float> %_xmm0) |
| 57 | ret i64 %0 |
| 58 | } |
| 59 | |
| 60 | define <4 x float> @testXMM_9(<4 x float> %_xmm0, i64 %_l) { |
| 61 | ; CHECK: vorpd %xmm1, %xmm0, %xmm0 |
| 62 | entry: |
| 63 | %0 = tail call <4 x float> asm "vorpd $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(i64 %_l, <4 x float> %_xmm0) |
| 64 | ret <4 x float> %0 |
| 65 | } |
| 66 | |
| 67 | define <4 x float> @testXMM_10(<4 x float> %_xmm0, i64 %_l) { |
| 68 | ; CHECK: pabsb %xmm0, %xmm0 |
| 69 | entry: |
| 70 | %0 = tail call <4 x float> asm "pabsb $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(i64 %_l) |
| 71 | ret <4 x float> %0 |
| 72 | } |
| 73 | |
| 74 | define <4 x float> @testXMM_11(<4 x float> %_xmm0, i64 %_l) { |
| 75 | ; CHECK: vpabsd %xmm0, %xmm0 |
| 76 | entry: |
| 77 | %0 = tail call <4 x float> asm "vpabsd $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(i64 %_l) |
| 78 | ret <4 x float> %0 |
| 79 | } |
| 80 | |
| 81 | define <8 x float> @testYMM_1(<8 x float> %_ymm0, <8 x float> %_ymm1) { |
| 82 | ; CHECK: vmovsldup %ymm0, %ymm0 |
| 83 | entry: |
| 84 | %0 = tail call <8 x float> asm "vmovsldup $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm0) |
| 85 | ret <8 x float> %0 |
| 86 | } |
| 87 | |
| 88 | define <8 x float> @testYMM_2(<8 x float> %_ymm0, <8 x float> %_ymm1) { |
| 89 | ; CHECK: vmovapd %ymm1, %ymm0 |
| 90 | entry: |
| 91 | %0 = tail call <8 x float> asm "vmovapd $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1) |
| 92 | ret <8 x float> %0 |
| 93 | } |
| 94 | |
| 95 | define <8 x float> @testYMM_3(<8 x float> %_ymm0, <8 x float> %_ymm1) { |
| 96 | ; CHECK: vminpd %ymm1, %ymm0, %ymm0 |
| 97 | entry: |
| 98 | %0 = tail call <8 x float> asm "vminpd $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1, <8 x float> %_ymm0) |
| 99 | ret <8 x float> %0 |
| 100 | } |
| 101 | |
| 102 | define <8 x float> @testYMM_4(<8 x float> %_ymm0, <8 x float> %_ymm1) { |
| 103 | ; CHECK: vorpd %ymm1, %ymm0, %ymm0 |
| 104 | entry: |
| 105 | %0 = tail call <8 x float> asm "vorpd $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1, <8 x float> %_ymm0) |
| 106 | ret <8 x float> %0 |
| 107 | } |
| 108 | |
| 109 | define <8 x float> @testYMM(<8 x float> %_ymm0, <8 x float> %_ymm1) { |
| 110 | ; CHECK: vmulps %ymm1, %ymm0, %ymm0 |
| 111 | entry: |
| 112 | %0 = tail call <8 x float> asm "vmulps $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1, <8 x float> %_ymm0) |
| 113 | ret <8 x float> %0 |
| 114 | } |
| 115 | |
| 116 | define <8 x float> @testYMM_6(<8 x float> %_ymm0, <8 x float> %_ymm1) { |
| 117 | ; CHECK: vmulpd %ymm1, %ymm0, %ymm0 |
| 118 | entry: |
| 119 | %0 = tail call <8 x float> asm "vmulpd $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1, <8 x float> %_ymm0) |
| 120 | ret <8 x float> %0 |
| 121 | } |
| 122 | |
| 123 | define <8 x float> @testYMM_7(<8 x float> %_ymm0, <8 x float> %_ymm1) { |
| 124 | ; CHECK: vmovups %ymm1, %ymm0 |
| 125 | entry: |
| 126 | %0 = tail call <8 x float> asm "vmovups $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1) |
| 127 | ret <8 x float> %0 |
| 128 | } |
| 129 | |
| 130 | define <8 x float> @testYMM_8(<8 x float> %_ymm0, <8 x float> %_ymm1) { |
| 131 | ; CHECK: vmovupd %ymm1, %ymm0 |
| 132 | entry: |
| 133 | %0 = tail call <8 x float> asm "vmovupd $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1) |
| 134 | ret <8 x float> %0 |
| 135 | } |
| 136 | |