blob: b1ef02c855a1780062d8fc8020c92f568b9401e3 [file] [log] [blame]
Craig Topper07815fc2019-12-18 16:09:38 -08001; RUN: llc -O3 -mtriple=x86_64-pc-linux -stop-after=finalize-isel < %s | FileCheck %s
2
3define <1 x float> @constrained_vector_fadd_v1f32() #0 {
4; CHECK-LABEL: name: constrained_vector_fadd_v1f32
5; CHECK: [[MOVSSrm_alt:%[0-9]+]]:fr32 = MOVSSrm_alt $rip, 1, $noreg, %const.0, $noreg :: (load 4 from constant-pool)
Ulrich Weigandf0fd11d2020-01-10 15:31:10 +01006; CHECK: [[ADDSSrm:%[0-9]+]]:fr32 = ADDSSrm [[MOVSSrm_alt]], $rip, 1, $noreg, %const.1, $noreg, implicit $mxcsr :: (load 4 from constant-pool)
Craig Topper07815fc2019-12-18 16:09:38 -08007; CHECK: $xmm0 = COPY [[ADDSSrm]]
8; CHECK: RET 0, $xmm0
9entry:
10 %add = call <1 x float> @llvm.experimental.constrained.fadd.v1f32(<1 x float> <float 0x7FF0000000000000>, <1 x float> <float 1.0>, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
11 ret <1 x float> %add
12}
13
14define <3 x float> @constrained_vector_fadd_v3f32() #0 {
15; CHECK-LABEL: name: constrained_vector_fadd_v3f32
16; CHECK: [[FsFLD0SS:%[0-9]+]]:fr32 = FsFLD0SS
17; CHECK: [[MOVSSrm_alt:%[0-9]+]]:fr32 = MOVSSrm_alt $rip, 1, $noreg, %const.0, $noreg :: (load 4 from constant-pool)
Ulrich Weigandf0fd11d2020-01-10 15:31:10 +010018; CHECK: [[ADDSSrr:%[0-9]+]]:fr32 = ADDSSrr [[MOVSSrm_alt]], killed [[FsFLD0SS]], implicit $mxcsr
19; CHECK: [[ADDSSrm:%[0-9]+]]:fr32 = ADDSSrm [[MOVSSrm_alt]], $rip, 1, $noreg, %const.1, $noreg, implicit $mxcsr :: (load 4 from constant-pool)
20; CHECK: [[ADDSSrm1:%[0-9]+]]:fr32 = ADDSSrm [[MOVSSrm_alt]], $rip, 1, $noreg, %const.2, $noreg, implicit $mxcsr :: (load 4 from constant-pool)
Craig Topper07815fc2019-12-18 16:09:38 -080021; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY [[ADDSSrm1]]
22; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY [[ADDSSrm]]
23; CHECK: [[UNPCKLPSrr:%[0-9]+]]:vr128 = UNPCKLPSrr [[COPY1]], killed [[COPY]]
24; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY [[ADDSSrr]]
25; CHECK: [[UNPCKLPDrr:%[0-9]+]]:vr128 = UNPCKLPDrr [[UNPCKLPSrr]], killed [[COPY2]]
26; CHECK: $xmm0 = COPY [[UNPCKLPDrr]]
27; CHECK: RET 0, $xmm0
28entry:
29 %add = call <3 x float> @llvm.experimental.constrained.fadd.v3f32(
30 <3 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000,
31 float 0xFFFFFFFFE0000000>,
32 <3 x float> <float 2.0, float 1.0, float 0.0>,
33 metadata !"round.dynamic",
34 metadata !"fpexcept.strict") #0
35 ret <3 x float> %add
36}
37
38define <4 x double> @constrained_vector_fadd_v4f64() #0 {
39; CHECK-LABEL: name: constrained_vector_fadd_v4f64
40; CHECK: [[MOVAPDrm:%[0-9]+]]:vr128 = MOVAPDrm $rip, 1, $noreg, %const.0, $noreg :: (load 16 from constant-pool)
Ulrich Weigandf0fd11d2020-01-10 15:31:10 +010041; CHECK: [[ADDPDrm:%[0-9]+]]:vr128 = ADDPDrm [[MOVAPDrm]], $rip, 1, $noreg, %const.1, $noreg, implicit $mxcsr :: (load 16 from constant-pool)
42; CHECK: [[ADDPDrm1:%[0-9]+]]:vr128 = ADDPDrm [[MOVAPDrm]], $rip, 1, $noreg, %const.2, $noreg, implicit $mxcsr :: (load 16 from constant-pool)
Ulrich Weigand04a86962020-01-13 14:37:07 +010043; CHECK: $xmm0 = COPY [[ADDPDrm1]]
44; CHECK: $xmm1 = COPY [[ADDPDrm]]
Craig Topper07815fc2019-12-18 16:09:38 -080045; CHECK: RET 0, $xmm0, $xmm1
46entry:
47 %add = call <4 x double> @llvm.experimental.constrained.fadd.v4f64(
48 <4 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF,
49 double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF>,
50 <4 x double> <double 1.000000e+00, double 1.000000e-01,
51 double 2.000000e+00, double 2.000000e-01>,
52 metadata !"round.dynamic",
53 metadata !"fpexcept.strict") #0
54 ret <4 x double> %add
55}
56
57declare <1 x float> @llvm.experimental.constrained.fadd.v1f32(<1 x float>, <1 x float>, metadata, metadata)
58declare <3 x float> @llvm.experimental.constrained.fadd.v3f32(<3 x float>, <3 x float>, metadata, metadata)
59declare <4 x double> @llvm.experimental.constrained.fadd.v4f64(<4 x double>, <4 x double>, metadata, metadata)