blob: 5aac2d7910cd966a09adf5aa28e87d87d57b293c [file] [log] [blame]
Sanjay Patel5562c552015-02-17 21:59:54 +00001; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2,sse-unaligned-mem | FileCheck %s --check-prefix=SSE2
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefix=AVX
Sanjay Patelb811c1d2015-02-17 20:08:21 +00003
4; Although we have the ability to fold an unaligned load with AVX
5; and under special conditions with some SSE implementations, we
6; can not fold the load under any circumstances in these test
7; cases because they are not 16-byte loads. The load must be
8; executed as a scalar ('movs*') with a zero extension to
9; 128-bits and then used in the packed logical ('andp*') op.
10; PR22371 - http://llvm.org/bugs/show_bug.cgi?id=22371
11
12define double @load_double_no_fold(double %x, double %y) {
13; SSE2-LABEL: load_double_no_fold:
Sanjay Patel716fef62015-02-17 20:29:31 +000014; SSE2: BB#0:
Sanjay Patelb811c1d2015-02-17 20:08:21 +000015; SSE2-NEXT: cmplesd %xmm0, %xmm1
16; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
17; SSE2-NEXT: andpd %xmm1, %xmm0
18; SSE2-NEXT: retq
19;
20; AVX-LABEL: load_double_no_fold:
Sanjay Patel716fef62015-02-17 20:29:31 +000021; AVX: BB#0:
Sanjay Patelb811c1d2015-02-17 20:08:21 +000022; AVX-NEXT: vcmplesd %xmm0, %xmm1, %xmm0
23; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
24; AVX-NEXT: vandpd %xmm1, %xmm0, %xmm0
25; AVX-NEXT: retq
26
27 %cmp = fcmp oge double %x, %y
28 %zext = zext i1 %cmp to i32
29 %conv = sitofp i32 %zext to double
30 ret double %conv
31}
32
33define float @load_float_no_fold(float %x, float %y) {
34; SSE2-LABEL: load_float_no_fold:
Sanjay Patel716fef62015-02-17 20:29:31 +000035; SSE2: BB#0:
Sanjay Patelb811c1d2015-02-17 20:08:21 +000036; SSE2-NEXT: cmpless %xmm0, %xmm1
37; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
38; SSE2-NEXT: andps %xmm1, %xmm0
39; SSE2-NEXT: retq
40;
41; AVX-LABEL: load_float_no_fold:
Sanjay Patel716fef62015-02-17 20:29:31 +000042; AVX: BB#0:
Sanjay Patelb811c1d2015-02-17 20:08:21 +000043; AVX-NEXT: vcmpless %xmm0, %xmm1, %xmm0
44; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
45; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
46; AVX-NEXT: retq
47
48 %cmp = fcmp oge float %x, %y
49 %zext = zext i1 %cmp to i32
50 %conv = sitofp i32 %zext to float
51 ret float %conv
52}
53