blob: 47e07688702894cd1c42c121c3d20cdfcf731393 [file] [log] [blame]
Sanjay Patel2a517482016-11-15 21:19:28 +00001; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX
4
5; Test that we can replace "scalar" FP-bitwise-logic with the optimal instruction.
Sanjay Patel7f3d51f2016-11-16 17:42:40 +00006; Scalar x86 FP-logic instructions only exist in your imagination and/or the bowels
Sanjay Patel2a517482016-11-15 21:19:28 +00007; of compilers, but float and double variants of FP-logic instructions are reality
Sanjay Patel7f3d51f2016-11-16 17:42:40 +00008; and float may be a shorter instruction depending on which flavor of vector ISA
9; you have...so just prefer float all the time, ok? Yay, x86!
Sanjay Patel2a517482016-11-15 21:19:28 +000010
11define double @FsANDPSrr(double %x, double %y) {
12; SSE-LABEL: FsANDPSrr:
13; SSE: # BB#0:
Sanjay Patel7f3d51f2016-11-16 17:42:40 +000014; SSE-NEXT: andps %xmm1, %xmm0
Sanjay Patel2a517482016-11-15 21:19:28 +000015; SSE-NEXT: retq
16;
17; AVX-LABEL: FsANDPSrr:
18; AVX: # BB#0:
Sanjay Patel7f3d51f2016-11-16 17:42:40 +000019; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
Sanjay Patel2a517482016-11-15 21:19:28 +000020; AVX-NEXT: retq
21;
22 %bc1 = bitcast double %x to i64
23 %bc2 = bitcast double %y to i64
24 %and = and i64 %bc1, %bc2
25 %bc3 = bitcast i64 %and to double
26 ret double %bc3
27}
28
29define double @FsANDNPSrr(double %x, double %y) {
30; SSE-LABEL: FsANDNPSrr:
31; SSE: # BB#0:
32; SSE-NEXT: movd %xmm0, %rax
33; SSE-NEXT: movd %xmm1, %rcx
34; SSE-NEXT: notq %rcx
35; SSE-NEXT: andq %rax, %rcx
36; SSE-NEXT: movd %rcx, %xmm0
37; SSE-NEXT: retq
38;
39; AVX-LABEL: FsANDNPSrr:
40; AVX: # BB#0:
41; AVX-NEXT: vmovq %xmm0, %rax
42; AVX-NEXT: vmovq %xmm1, %rcx
43; AVX-NEXT: notq %rcx
44; AVX-NEXT: andq %rax, %rcx
45; AVX-NEXT: vmovq %rcx, %xmm0
46; AVX-NEXT: retq
47;
48 %bc1 = bitcast double %x to i64
49 %bc2 = bitcast double %y to i64
50 %not = xor i64 %bc2, -1
51 %and = and i64 %bc1, %not
52 %bc3 = bitcast i64 %and to double
53 ret double %bc3
54}
55
56define double @FsORPSrr(double %x, double %y) {
57; SSE-LABEL: FsORPSrr:
58; SSE: # BB#0:
Sanjay Patel7f3d51f2016-11-16 17:42:40 +000059; SSE-NEXT: orps %xmm1, %xmm0
Sanjay Patel2a517482016-11-15 21:19:28 +000060; SSE-NEXT: retq
61;
62; AVX-LABEL: FsORPSrr:
63; AVX: # BB#0:
Sanjay Patel7f3d51f2016-11-16 17:42:40 +000064; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
Sanjay Patel2a517482016-11-15 21:19:28 +000065; AVX-NEXT: retq
66;
67 %bc1 = bitcast double %x to i64
68 %bc2 = bitcast double %y to i64
69 %or = or i64 %bc1, %bc2
70 %bc3 = bitcast i64 %or to double
71 ret double %bc3
72}
73
74define double @FsXORPSrr(double %x, double %y) {
75; SSE-LABEL: FsXORPSrr:
76; SSE: # BB#0:
Sanjay Patel7f3d51f2016-11-16 17:42:40 +000077; SSE-NEXT: xorps %xmm1, %xmm0
Sanjay Patel2a517482016-11-15 21:19:28 +000078; SSE-NEXT: retq
79;
80; AVX-LABEL: FsXORPSrr:
81; AVX: # BB#0:
Sanjay Patel7f3d51f2016-11-16 17:42:40 +000082; AVX-NEXT: vxorps %xmm1, %xmm0, %xmm0
Sanjay Patel2a517482016-11-15 21:19:28 +000083; AVX-NEXT: retq
84;
85 %bc1 = bitcast double %x to i64
86 %bc2 = bitcast double %y to i64
87 %xor = xor i64 %bc1, %bc2
88 %bc3 = bitcast i64 %xor to double
89 ret double %bc3
90}
91