Craig Topper | 7feb321 | 2011-12-30 09:15:03 +0000 | [diff] [blame] | 1 | // RUN: %clang_cc1 %s -O3 -triple=x86_64-apple-darwin -target-feature +fma4 -emit-llvm -o - | FileCheck %s |
| 2 | |
| 3 | // Don't include mm_malloc.h, it's system specific. |
| 4 | #define __MM_MALLOC_H |
| 5 | |
| 6 | #include <x86intrin.h> |
| 7 | |
| 8 | __m128 test_mm_macc_ps(__m128 a, __m128 b, __m128 c) { |
| 9 | // CHECK: @llvm.x86.fma4.vfmadd.ps |
| 10 | return _mm_macc_ps(a, b, c); |
| 11 | } |
| 12 | |
| 13 | __m128d test_mm_macc_pd(__m128d a, __m128d b, __m128d c) { |
| 14 | // CHECK: @llvm.x86.fma4.vfmadd.pd |
| 15 | return _mm_macc_pd(a, b, c); |
| 16 | } |
| 17 | |
| 18 | __m128 test_mm_macc_ss(__m128 a, __m128 b, __m128 c) { |
| 19 | // CHECK: @llvm.x86.fma4.vfmadd.ss |
| 20 | return _mm_macc_ss(a, b, c); |
| 21 | } |
| 22 | |
| 23 | __m128d test_mm_macc_sd(__m128d a, __m128d b, __m128d c) { |
| 24 | // CHECK: @llvm.x86.fma4.vfmadd.sd |
| 25 | return _mm_macc_sd(a, b, c); |
| 26 | } |
| 27 | |
| 28 | __m128 test_mm_msub_ps(__m128 a, __m128 b, __m128 c) { |
| 29 | // CHECK: @llvm.x86.fma4.vfmsub.ps |
| 30 | return _mm_msub_ps(a, b, c); |
| 31 | } |
| 32 | |
| 33 | __m128d test_mm_msub_pd(__m128d a, __m128d b, __m128d c) { |
| 34 | // CHECK: @llvm.x86.fma4.vfmsub.pd |
| 35 | return _mm_msub_pd(a, b, c); |
| 36 | } |
| 37 | |
| 38 | __m128 test_mm_msub_ss(__m128 a, __m128 b, __m128 c) { |
| 39 | // CHECK: @llvm.x86.fma4.vfmsub.ss |
| 40 | return _mm_msub_ss(a, b, c); |
| 41 | } |
| 42 | |
| 43 | __m128d test_mm_msub_sd(__m128d a, __m128d b, __m128d c) { |
| 44 | // CHECK: @llvm.x86.fma4.vfmsub.sd |
| 45 | return _mm_msub_sd(a, b, c); |
| 46 | } |
| 47 | |
| 48 | __m128 test_mm_nmacc_ps(__m128 a, __m128 b, __m128 c) { |
| 49 | // CHECK: @llvm.x86.fma4.vfnmadd.ps |
| 50 | return _mm_nmacc_ps(a, b, c); |
| 51 | } |
| 52 | |
| 53 | __m128d test_mm_nmacc_pd(__m128d a, __m128d b, __m128d c) { |
| 54 | // CHECK: @llvm.x86.fma4.vfnmadd.pd |
| 55 | return _mm_nmacc_pd(a, b, c); |
| 56 | } |
| 57 | |
| 58 | __m128 test_mm_nmacc_ss(__m128 a, __m128 b, __m128 c) { |
| 59 | // CHECK: @llvm.x86.fma4.vfnmadd.ss |
| 60 | return _mm_nmacc_ss(a, b, c); |
| 61 | } |
| 62 | |
| 63 | __m128d test_mm_nmacc_sd(__m128d a, __m128d b, __m128d c) { |
| 64 | // CHECK: @llvm.x86.fma4.vfnmadd.sd |
| 65 | return _mm_nmacc_sd(a, b, c); |
| 66 | } |
| 67 | |
| 68 | __m128 test_mm_nmsub_ps(__m128 a, __m128 b, __m128 c) { |
| 69 | // CHECK: @llvm.x86.fma4.vfnmsub.ps |
| 70 | return _mm_nmsub_ps(a, b, c); |
| 71 | } |
| 72 | |
| 73 | __m128d test_mm_nmsub_pd(__m128d a, __m128d b, __m128d c) { |
| 74 | // CHECK: @llvm.x86.fma4.vfnmsub.pd |
| 75 | return _mm_nmsub_pd(a, b, c); |
| 76 | } |
| 77 | |
| 78 | __m128 test_mm_nmsub_ss(__m128 a, __m128 b, __m128 c) { |
| 79 | // CHECK: @llvm.x86.fma4.vfnmsub.ss |
| 80 | return _mm_nmsub_ss(a, b, c); |
| 81 | } |
| 82 | |
| 83 | __m128d test_mm_nmsub_sd(__m128d a, __m128d b, __m128d c) { |
| 84 | // CHECK: @llvm.x86.fma4.vfnmsub.sd |
| 85 | return _mm_nmsub_sd(a, b, c); |
| 86 | } |
| 87 | |
| 88 | __m128 test_mm_maddsub_ps(__m128 a, __m128 b, __m128 c) { |
| 89 | // CHECK: @llvm.x86.fma4.vfmaddsub.ps |
| 90 | return _mm_maddsub_ps(a, b, c); |
| 91 | } |
| 92 | |
| 93 | __m128d test_mm_maddsub_pd(__m128d a, __m128d b, __m128d c) { |
| 94 | // CHECK: @llvm.x86.fma4.vfmaddsub.pd |
| 95 | return _mm_maddsub_pd(a, b, c); |
| 96 | } |
| 97 | |
| 98 | __m128 test_mm_msubadd_ps(__m128 a, __m128 b, __m128 c) { |
| 99 | // CHECK: @llvm.x86.fma4.vfmsubadd.ps |
| 100 | return _mm_msubadd_ps(a, b, c); |
| 101 | } |
| 102 | |
| 103 | __m128d test_mm_msubadd_pd(__m128d a, __m128d b, __m128d c) { |
| 104 | // CHECK: @llvm.x86.fma4.vfmsubadd.pd |
| 105 | return _mm_msubadd_pd(a, b, c); |
| 106 | } |
| 107 | |
| 108 | __m256 test_mm256_macc_ps(__m256 a, __m256 b, __m256 c) { |
| 109 | // CHECK: @llvm.x86.fma4.vfmadd.ps.256 |
| 110 | return _mm256_macc_ps(a, b, c); |
| 111 | } |
| 112 | |
| 113 | __m256d test_mm256_macc_pd(__m256d a, __m256d b, __m256d c) { |
| 114 | // CHECK: @llvm.x86.fma4.vfmadd.pd.256 |
| 115 | return _mm256_macc_pd(a, b, c); |
| 116 | } |
| 117 | |
| 118 | __m256 test_mm256_msub_ps(__m256 a, __m256 b, __m256 c) { |
| 119 | // CHECK: @llvm.x86.fma4.vfmsub.ps.256 |
| 120 | return _mm256_msub_ps(a, b, c); |
| 121 | } |
| 122 | |
| 123 | __m256d test_mm256_msub_pd(__m256d a, __m256d b, __m256d c) { |
| 124 | // CHECK: @llvm.x86.fma4.vfmsub.pd.256 |
| 125 | return _mm256_msub_pd(a, b, c); |
| 126 | } |
| 127 | |
| 128 | __m256 test_mm256_nmacc_ps(__m256 a, __m256 b, __m256 c) { |
| 129 | // CHECK: @llvm.x86.fma4.vfnmadd.ps.256 |
| 130 | return _mm256_nmacc_ps(a, b, c); |
| 131 | } |
| 132 | |
| 133 | __m256d test_mm256_nmacc_pd(__m256d a, __m256d b, __m256d c) { |
| 134 | // CHECK: @llvm.x86.fma4.vfnmadd.pd.256 |
| 135 | return _mm256_nmacc_pd(a, b, c); |
| 136 | } |
| 137 | |
| 138 | __m256 test_mm256_nmsub_ps(__m256 a, __m256 b, __m256 c) { |
| 139 | // CHECK: @llvm.x86.fma4.vfnmsub.ps.256 |
| 140 | return _mm256_nmsub_ps(a, b, c); |
| 141 | } |
| 142 | |
| 143 | __m256d test_mm256_nmsub_pd(__m256d a, __m256d b, __m256d c) { |
| 144 | // CHECK: @llvm.x86.fma4.vfnmsub.pd.256 |
| 145 | return _mm256_nmsub_pd(a, b, c); |
| 146 | } |
| 147 | |
| 148 | __m256 test_mm256_maddsub_ps(__m256 a, __m256 b, __m256 c) { |
| 149 | // CHECK: @llvm.x86.fma4.vfmaddsub.ps.256 |
| 150 | return _mm256_maddsub_ps(a, b, c); |
| 151 | } |
| 152 | |
| 153 | __m256d test_mm256_maddsub_pd(__m256d a, __m256d b, __m256d c) { |
| 154 | // CHECK: @llvm.x86.fma4.vfmaddsub.pd.256 |
| 155 | return _mm256_maddsub_pd(a, b, c); |
| 156 | } |
| 157 | |
| 158 | __m256 test_mm256_msubadd_ps(__m256 a, __m256 b, __m256 c) { |
| 159 | // CHECK: @llvm.x86.fma4.vfmsubadd.ps.256 |
| 160 | return _mm256_msubadd_ps(a, b, c); |
| 161 | } |
| 162 | |
| 163 | __m256d test_mm256_msubadd_pd(__m256d a, __m256d b, __m256d c) { |
| 164 | // CHECK: @llvm.x86.fma4.vfmsubadd.pd.256 |
| 165 | return _mm256_msubadd_pd(a, b, c); |
| 166 | } |