Oliver Stannard | 51b1d46 | 2014-08-21 12:50:31 +0000 | [diff] [blame] | 1 | ; RUN: llc < %s -mtriple=thumbv7-none-eabi -mcpu=cortex-m3 | FileCheck %s -check-prefix=CHECK -check-prefix=NONE |
Oliver Stannard | 37e4daa | 2014-10-01 09:02:17 +0000 | [diff] [blame] | 2 | ; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m4 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=SP -check-prefix=VFP4-ALL |
| 3 | ; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m7 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=FP-ARMv8 |
| 4 | ; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a8 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=VFP4-ALL -check-prefix=VFP4-DP |
Oliver Stannard | 51b1d46 | 2014-08-21 12:50:31 +0000 | [diff] [blame] | 5 | |
| 6 | define float @add_f(float %a, float %b) { |
| 7 | entry: |
| 8 | ; CHECK-LABEL: add_f: |
| 9 | ; NONE: bl __aeabi_fadd |
| 10 | ; HARD: vadd.f32 s0, s0, s1 |
| 11 | %0 = fadd float %a, %b |
| 12 | ret float %0 |
| 13 | } |
| 14 | |
| 15 | define double @add_d(double %a, double %b) { |
| 16 | entry: |
| 17 | ; CHECK-LABEL: add_d: |
| 18 | ; NONE: bl __aeabi_dadd |
| 19 | ; SP: bl __aeabi_dadd |
| 20 | ; DP: vadd.f64 d0, d0, d1 |
| 21 | %0 = fadd double %a, %b |
| 22 | ret double %0 |
| 23 | } |
| 24 | |
| 25 | define float @sub_f(float %a, float %b) { |
| 26 | entry: |
| 27 | ; CHECK-LABEL: sub_f: |
| 28 | ; NONE: bl __aeabi_fsub |
| 29 | ; HARD: vsub.f32 s |
| 30 | %0 = fsub float %a, %b |
| 31 | ret float %0 |
| 32 | } |
| 33 | |
| 34 | define double @sub_d(double %a, double %b) { |
| 35 | entry: |
| 36 | ; CHECK-LABEL: sub_d: |
| 37 | ; NONE: bl __aeabi_dsub |
| 38 | ; SP: bl __aeabi_dsub |
| 39 | ; DP: vsub.f64 d0, d0, d1 |
| 40 | %0 = fsub double %a, %b |
| 41 | ret double %0 |
| 42 | } |
| 43 | |
| 44 | define float @mul_f(float %a, float %b) { |
| 45 | entry: |
| 46 | ; CHECK-LABEL: mul_f: |
| 47 | ; NONE: bl __aeabi_fmul |
| 48 | ; HARD: vmul.f32 s |
| 49 | %0 = fmul float %a, %b |
| 50 | ret float %0 |
| 51 | } |
| 52 | |
| 53 | define double @mul_d(double %a, double %b) { |
| 54 | entry: |
| 55 | ; CHECK-LABEL: mul_d: |
| 56 | ; NONE: bl __aeabi_dmul |
| 57 | ; SP: bl __aeabi_dmul |
| 58 | ; DP: vmul.f64 d0, d0, d1 |
| 59 | %0 = fmul double %a, %b |
| 60 | ret double %0 |
| 61 | } |
| 62 | |
| 63 | define float @div_f(float %a, float %b) { |
| 64 | entry: |
| 65 | ; CHECK-LABEL: div_f: |
| 66 | ; NONE: bl __aeabi_fdiv |
| 67 | ; HARD: vdiv.f32 s |
| 68 | %0 = fdiv float %a, %b |
| 69 | ret float %0 |
| 70 | } |
| 71 | |
| 72 | define double @div_d(double %a, double %b) { |
| 73 | entry: |
| 74 | ; CHECK-LABEL: div_d: |
| 75 | ; NONE: bl __aeabi_ddiv |
| 76 | ; SP: bl __aeabi_ddiv |
| 77 | ; DP: vdiv.f64 d0, d0, d1 |
| 78 | %0 = fdiv double %a, %b |
| 79 | ret double %0 |
| 80 | } |
| 81 | |
| 82 | define float @rem_f(float %a, float %b) { |
| 83 | entry: |
| 84 | ; CHECK-LABEL: rem_f: |
| 85 | ; NONE: bl fmodf |
Saleem Abdulrasool | a7ade33 | 2016-09-07 03:17:19 +0000 | [diff] [blame] | 86 | ; HARD: b fmodf |
Oliver Stannard | 51b1d46 | 2014-08-21 12:50:31 +0000 | [diff] [blame] | 87 | %0 = frem float %a, %b |
| 88 | ret float %0 |
| 89 | } |
| 90 | |
| 91 | define double @rem_d(double %a, double %b) { |
| 92 | entry: |
| 93 | ; CHECK-LABEL: rem_d: |
| 94 | ; NONE: bl fmod |
Saleem Abdulrasool | a7ade33 | 2016-09-07 03:17:19 +0000 | [diff] [blame] | 95 | ; HARD: b fmod |
Oliver Stannard | 51b1d46 | 2014-08-21 12:50:31 +0000 | [diff] [blame] | 96 | %0 = frem double %a, %b |
| 97 | ret double %0 |
| 98 | } |
| 99 | |
| 100 | define float @load_f(float* %a) { |
| 101 | entry: |
| 102 | ; CHECK-LABEL: load_f: |
| 103 | ; NONE: ldr r0, [r0] |
| 104 | ; HARD: vldr s0, [r0] |
David Blaikie | a79ac14 | 2015-02-27 21:17:42 +0000 | [diff] [blame] | 105 | %0 = load float, float* %a, align 4 |
Oliver Stannard | 51b1d46 | 2014-08-21 12:50:31 +0000 | [diff] [blame] | 106 | ret float %0 |
| 107 | } |
| 108 | |
| 109 | define double @load_d(double* %a) { |
| 110 | entry: |
| 111 | ; CHECK-LABEL: load_d: |
Matthias Braun | ba3ecc3 | 2015-06-24 20:03:27 +0000 | [diff] [blame] | 112 | ; NONE: ldm r0, {r0, r1} |
Oliver Stannard | 51b1d46 | 2014-08-21 12:50:31 +0000 | [diff] [blame] | 113 | ; HARD: vldr d0, [r0] |
David Blaikie | a79ac14 | 2015-02-27 21:17:42 +0000 | [diff] [blame] | 114 | %0 = load double, double* %a, align 8 |
Oliver Stannard | 51b1d46 | 2014-08-21 12:50:31 +0000 | [diff] [blame] | 115 | ret double %0 |
| 116 | } |
| 117 | |
| 118 | define void @store_f(float* %a, float %b) { |
| 119 | entry: |
| 120 | ; CHECK-LABEL: store_f: |
| 121 | ; NONE: str r1, [r0] |
| 122 | ; HARD: vstr s0, [r0] |
| 123 | store float %b, float* %a, align 4 |
| 124 | ret void |
| 125 | } |
| 126 | |
| 127 | define void @store_d(double* %a, double %b) { |
| 128 | entry: |
| 129 | ; CHECK-LABEL: store_d: |
Matthias Braun | 125c9f5 | 2015-06-03 16:30:24 +0000 | [diff] [blame] | 130 | ; NONE: strd r2, r3, [r0] |
Oliver Stannard | 51b1d46 | 2014-08-21 12:50:31 +0000 | [diff] [blame] | 131 | ; HARD: vstr d0, [r0] |
| 132 | store double %b, double* %a, align 8 |
| 133 | ret void |
| 134 | } |
| 135 | |
| 136 | define double @f_to_d(float %a) { |
| 137 | ; CHECK-LABEL: f_to_d: |
| 138 | ; NONE: bl __aeabi_f2d |
| 139 | ; SP: bl __aeabi_f2d |
| 140 | ; DP: vcvt.f64.f32 d0, s0 |
| 141 | %1 = fpext float %a to double |
| 142 | ret double %1 |
| 143 | } |
| 144 | |
| 145 | define float @d_to_f(double %a) { |
| 146 | ; CHECK-LABEL: d_to_f: |
| 147 | ; NONE: bl __aeabi_d2f |
| 148 | ; SP: bl __aeabi_d2f |
| 149 | ; DP: vcvt.f32.f64 s0, d0 |
| 150 | %1 = fptrunc double %a to float |
| 151 | ret float %1 |
| 152 | } |
| 153 | |
| 154 | define i32 @f_to_si(float %a) { |
| 155 | ; CHECK-LABEL: f_to_si: |
| 156 | ; NONE: bl __aeabi_f2iz |
| 157 | ; HARD: vcvt.s32.f32 s0, s0 |
| 158 | ; HARD: vmov r0, s0 |
| 159 | %1 = fptosi float %a to i32 |
| 160 | ret i32 %1 |
| 161 | } |
| 162 | |
| 163 | define i32 @d_to_si(double %a) { |
| 164 | ; CHECK-LABEL: d_to_si: |
| 165 | ; NONE: bl __aeabi_d2iz |
| 166 | ; SP: vmov r0, r1, d0 |
| 167 | ; SP: bl __aeabi_d2iz |
| 168 | ; DP: vcvt.s32.f64 s0, d0 |
| 169 | ; DP: vmov r0, s0 |
| 170 | %1 = fptosi double %a to i32 |
| 171 | ret i32 %1 |
| 172 | } |
| 173 | |
| 174 | define i32 @f_to_ui(float %a) { |
| 175 | ; CHECK-LABEL: f_to_ui: |
| 176 | ; NONE: bl __aeabi_f2uiz |
| 177 | ; HARD: vcvt.u32.f32 s0, s0 |
| 178 | ; HARD: vmov r0, s0 |
| 179 | %1 = fptoui float %a to i32 |
| 180 | ret i32 %1 |
| 181 | } |
| 182 | |
| 183 | define i32 @d_to_ui(double %a) { |
| 184 | ; CHECK-LABEL: d_to_ui: |
| 185 | ; NONE: bl __aeabi_d2uiz |
| 186 | ; SP: vmov r0, r1, d0 |
| 187 | ; SP: bl __aeabi_d2uiz |
| 188 | ; DP: vcvt.u32.f64 s0, d0 |
| 189 | ; DP: vmov r0, s0 |
| 190 | %1 = fptoui double %a to i32 |
| 191 | ret i32 %1 |
| 192 | } |
| 193 | |
| 194 | define float @si_to_f(i32 %a) { |
| 195 | ; CHECK-LABEL: si_to_f: |
| 196 | ; NONE: bl __aeabi_i2f |
| 197 | ; HARD: vcvt.f32.s32 s0, s0 |
| 198 | %1 = sitofp i32 %a to float |
| 199 | ret float %1 |
| 200 | } |
| 201 | |
| 202 | define double @si_to_d(i32 %a) { |
| 203 | ; CHECK-LABEL: si_to_d: |
| 204 | ; NONE: bl __aeabi_i2d |
| 205 | ; SP: bl __aeabi_i2d |
| 206 | ; DP: vcvt.f64.s32 d0, s0 |
| 207 | %1 = sitofp i32 %a to double |
| 208 | ret double %1 |
| 209 | } |
| 210 | |
| 211 | define float @ui_to_f(i32 %a) { |
| 212 | ; CHECK-LABEL: ui_to_f: |
| 213 | ; NONE: bl __aeabi_ui2f |
| 214 | ; HARD: vcvt.f32.u32 s0, s0 |
| 215 | %1 = uitofp i32 %a to float |
| 216 | ret float %1 |
| 217 | } |
| 218 | |
| 219 | define double @ui_to_d(i32 %a) { |
| 220 | ; CHECK-LABEL: ui_to_d: |
| 221 | ; NONE: bl __aeabi_ui2d |
| 222 | ; SP: bl __aeabi_ui2d |
| 223 | ; DP: vcvt.f64.u32 d0, s0 |
| 224 | %1 = uitofp i32 %a to double |
| 225 | ret double %1 |
| 226 | } |
| 227 | |
| 228 | define float @bitcast_i_to_f(i32 %a) { |
| 229 | ; CHECK-LABEL: bitcast_i_to_f: |
| 230 | ; NONE-NOT: mov |
| 231 | ; HARD: vmov s0, r0 |
| 232 | %1 = bitcast i32 %a to float |
| 233 | ret float %1 |
| 234 | } |
| 235 | |
| 236 | define double @bitcast_i_to_d(i64 %a) { |
| 237 | ; CHECK-LABEL: bitcast_i_to_d: |
| 238 | ; NONE-NOT: mov |
| 239 | ; HARD: vmov d0, r0, r1 |
| 240 | %1 = bitcast i64 %a to double |
| 241 | ret double %1 |
| 242 | } |
| 243 | |
| 244 | define i32 @bitcast_f_to_i(float %a) { |
| 245 | ; CHECK-LABEL: bitcast_f_to_i: |
| 246 | ; NONE-NOT: mov |
| 247 | ; HARD: vmov r0, s0 |
| 248 | %1 = bitcast float %a to i32 |
| 249 | ret i32 %1 |
| 250 | } |
| 251 | |
| 252 | define i64 @bitcast_d_to_i(double %a) { |
| 253 | ; CHECK-LABEL: bitcast_d_to_i: |
| 254 | ; NONE-NOT: mov |
| 255 | ; HARD: vmov r0, r1, d0 |
| 256 | %1 = bitcast double %a to i64 |
| 257 | ret i64 %1 |
| 258 | } |
| 259 | |
| 260 | define float @select_f(float %a, float %b, i1 %c) { |
| 261 | ; CHECK-LABEL: select_f: |
Sjoerd Meijer | 96e10b5 | 2016-12-15 09:38:59 +0000 | [diff] [blame] | 262 | ; NONE: lsls r2, r2, #31 |
Oliver Stannard | 51b1d46 | 2014-08-21 12:50:31 +0000 | [diff] [blame] | 263 | ; NONE: moveq r0, r1 |
Sjoerd Meijer | 96e10b5 | 2016-12-15 09:38:59 +0000 | [diff] [blame] | 264 | ; HARD: lsls r0, r0, #31 |
Oliver Stannard | 37e4daa | 2014-10-01 09:02:17 +0000 | [diff] [blame] | 265 | ; VFP4-ALL: vmovne.f32 s1, s0 |
| 266 | ; VFP4-ALL: vmov.f32 s0, s1 |
| 267 | ; FP-ARMv8: vseleq.f32 s0, s1, s0 |
Oliver Stannard | 51b1d46 | 2014-08-21 12:50:31 +0000 | [diff] [blame] | 268 | %1 = select i1 %c, float %a, float %b |
| 269 | ret float %1 |
| 270 | } |
| 271 | |
| 272 | define double @select_d(double %a, double %b, i1 %c) { |
| 273 | ; CHECK-LABEL: select_d: |
Sjoerd Meijer | 96e10b5 | 2016-12-15 09:38:59 +0000 | [diff] [blame] | 274 | ; NONE: ldr{{(.w)?}} [[REG:r[0-9]+]], [sp] |
| 275 | ; NONE ands [[REG]], [[REG]], #1 |
Oliver Stannard | 51b1d46 | 2014-08-21 12:50:31 +0000 | [diff] [blame] | 276 | ; NONE: moveq r0, r2 |
| 277 | ; NONE: moveq r1, r3 |
James Molloy | e7d9736 | 2016-11-03 14:08:01 +0000 | [diff] [blame] | 278 | ; SP: ands r0, r0, #1 |
Oliver Stannard | 51b1d46 | 2014-08-21 12:50:31 +0000 | [diff] [blame] | 279 | ; SP-DAG: vmov [[ALO:r[0-9]+]], [[AHI:r[0-9]+]], d0 |
| 280 | ; SP-DAG: vmov [[BLO:r[0-9]+]], [[BHI:r[0-9]+]], d1 |
| 281 | ; SP: itt ne |
| 282 | ; SP-DAG: movne [[BLO]], [[ALO]] |
| 283 | ; SP-DAG: movne [[BHI]], [[AHI]] |
| 284 | ; SP: vmov d0, [[BLO]], [[BHI]] |
Sjoerd Meijer | 96e10b5 | 2016-12-15 09:38:59 +0000 | [diff] [blame] | 285 | ; DP: lsls r0, r0, #31 |
Oliver Stannard | 37e4daa | 2014-10-01 09:02:17 +0000 | [diff] [blame] | 286 | ; VFP4-DP: vmovne.f64 d1, d0 |
| 287 | ; VFP4-DP: vmov.f64 d0, d1 |
| 288 | ; FP-ARMV8: vseleq.f64 d0, d1, d0 |
Oliver Stannard | 51b1d46 | 2014-08-21 12:50:31 +0000 | [diff] [blame] | 289 | %1 = select i1 %c, double %a, double %b |
| 290 | ret double %1 |
| 291 | } |