blob: 3f2e4de3ed08a4481adcb03f6bbef98c06a4b525 [file] [log] [blame]
Wouter van Oortmerssen8a9cb242018-08-27 15:45:51 +00001; RUN: llc < %s -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -wasm-enable-unimplemented-simd -mattr=+simd128 --show-mc-encoding | FileCheck %s --check-prefixes CHECK,SIMD128
2; RUN: llc < %s -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -wasm-enable-unimplemented-simd -mattr=+simd128 -fast-isel --show-mc-encoding | FileCheck %s --check-prefixes CHECK,SIMD128
3; RUN: llc < %s -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+simd128 --show-mc-encoding | FileCheck %s --check-prefixes CHECK,SIMD128-VM
4; RUN: llc < %s -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+simd128 -fast-isel --show-mc-encoding | FileCheck %s --check-prefixes CHECK,SIMD128-VM
5; RUN: llc < %s -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=-simd128 --show-mc-encoding | FileCheck %s --check-prefixes CHECK,NO-SIMD128
6; RUN: llc < %s -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=-simd128 -fast-isel --show-mc-encoding | FileCheck %s --check-prefixes CHECK,NO-SIMD128
Derek Schuff39bf39f2016-08-02 23:16:09 +00007
8; Test that basic SIMD128 arithmetic operations assemble as expected.
9
10target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
Sam Clegga5908002018-05-10 17:49:11 +000011target triple = "wasm32-unknown-unknown"
Derek Schuff39bf39f2016-08-02 23:16:09 +000012
Derek Schuff39bf39f2016-08-02 23:16:09 +000013; ==============================================================================
14; 16 x i8
15; ==============================================================================
16; CHECK-LABEL: add_v16i8
17; NO-SIMD128-NOT: i8x16
18; SIMD128: .param v128, v128{{$}}
19; SIMD128: .result v128{{$}}
Heejin Ahnc15a8782018-08-14 19:10:50 +000020; SIMD128: i8x16.add $push0=, $0, $1 # encoding: [0xfd,0x18]{{$}}
21; SIMD128: return $pop0 #
Derek Schuff39bf39f2016-08-02 23:16:09 +000022define <16 x i8> @add_v16i8(<16 x i8> %x, <16 x i8> %y) {
23 %a = add <16 x i8> %x, %y
24 ret <16 x i8> %a
25}
26
27; CHECK-LABEL: sub_v16i8
28; NO-SIMD128-NOT: i8x16
29; SIMD128: .param v128, v128{{$}}
30; SIMD128: .result v128{{$}}
Heejin Ahnc15a8782018-08-14 19:10:50 +000031; SIMD128: i8x16.sub $push0=, $0, $1 # encoding: [0xfd,0x1c]{{$}}
32; SIMD128: return $pop0 #
Derek Schuff39bf39f2016-08-02 23:16:09 +000033define <16 x i8> @sub_v16i8(<16 x i8> %x, <16 x i8> %y) {
34 %a = sub <16 x i8> %x, %y
35 ret <16 x i8> %a
36}
37
38; CHECK-LABEL: mul_v16i8
39; NO-SIMD128-NOT: i8x16
40; SIMD128: .param v128, v128{{$}}
41; SIMD128: .result v128{{$}}
Heejin Ahnc15a8782018-08-14 19:10:50 +000042; SIMD128: i8x16.mul $push0=, $0, $1 # encoding: [0xfd,0x20]{{$}}
43; SIMD128: return $pop0 #
Derek Schuff39bf39f2016-08-02 23:16:09 +000044define <16 x i8> @mul_v16i8(<16 x i8> %x, <16 x i8> %y) {
45 %a = mul <16 x i8> %x, %y
46 ret <16 x i8> %a
47}
48
Thomas Livelyc1742572018-08-23 00:48:37 +000049; CHECK-LABEL: and_v16i8
50; NO-SIMD128-NOT: i8x16
51; SIMD128: .param v128, v128{{$}}
52; SIMD128: .result v128{{$}}
53; SIMD128: v128.and $push0=, $0, $1 # encoding: [0xfd,0x3b]{{$}}
54; SIMD128: return $pop0 #
55define <16 x i8> @and_v16i8(<16 x i8> %x, <16 x i8> %y) {
56 %a = and <16 x i8> %x, %y
57 ret <16 x i8> %a
58}
59
60; CHECK-LABEL: or_v16i8
61; NO-SIMD128-NOT: i8x16
62; SIMD128: .param v128, v128{{$}}
63; SIMD128: .result v128{{$}}
64; SIMD128: v128.or $push0=, $0, $1 # encoding: [0xfd,0x3c]{{$}}
65; SIMD128: return $pop0 #
66define <16 x i8> @or_v16i8(<16 x i8> %x, <16 x i8> %y) {
67 %a = or <16 x i8> %x, %y
68 ret <16 x i8> %a
69}
70
71; CHECK-LABEL: xor_v16i8
72; NO-SIMD128-NOT: i8x16
73; SIMD128: .param v128, v128{{$}}
74; SIMD128: .result v128{{$}}
75; SIMD128: v128.xor $push0=, $0, $1 # encoding: [0xfd,0x3d]{{$}}
76; SIMD128: return $pop0 #
77define <16 x i8> @xor_v16i8(<16 x i8> %x, <16 x i8> %y) {
78 %a = xor <16 x i8> %x, %y
79 ret <16 x i8> %a
80}
81
Thomas Lively995ad612018-08-28 18:31:15 +000082; CHECK-LABEL: not_v16i8
83; NO-SIMD128-NOT: v128
84; SIMD128: .param v128{{$}}
85; SIMD128: .result v128{{$}}
86; SIMD128: v128.not $push0=, $0 # encoding: [0xfd,0x3e]{{$}}
87; SIMD128: return $pop0 #
88define <16 x i8> @not_v16i8(<16 x i8> %x) {
89 %a = xor <16 x i8> %x, <i8 -1, i8 -1, i8 -1, i8 -1,
90 i8 -1, i8 -1, i8 -1, i8 -1,
91 i8 -1, i8 -1, i8 -1, i8 -1,
92 i8 -1, i8 -1, i8 -1, i8 -1>
93 ret <16 x i8> %a
94}
95
Derek Schuff39bf39f2016-08-02 23:16:09 +000096; ==============================================================================
97; 8 x i16
98; ==============================================================================
99; CHECK-LABEL: add_v8i16
100; NO-SIMD128-NOT: i16x8
101; SIMD128: .param v128, v128{{$}}
102; SIMD128: .result v128{{$}}
Heejin Ahnc15a8782018-08-14 19:10:50 +0000103; SIMD128: i16x8.add $push0=, $0, $1 # encoding: [0xfd,0x19]{{$}}
104; SIMD128: return $pop0 #
Derek Schuff39bf39f2016-08-02 23:16:09 +0000105define <8 x i16> @add_v8i16(<8 x i16> %x, <8 x i16> %y) {
106 %a = add <8 x i16> %x, %y
107 ret <8 x i16> %a
108}
109
110; CHECK-LABEL: sub_v8i16
111; NO-SIMD128-NOT: i16x8
112; SIMD128: .param v128, v128{{$}}
113; SIMD128: .result v128{{$}}
Heejin Ahnc15a8782018-08-14 19:10:50 +0000114; SIMD128: i16x8.sub $push0=, $0, $1 # encoding: [0xfd,0x1d]{{$}}
115; SIMD128: return $pop0 #
Derek Schuff39bf39f2016-08-02 23:16:09 +0000116define <8 x i16> @sub_v8i16(<8 x i16> %x, <8 x i16> %y) {
117 %a = sub <8 x i16> %x, %y
118 ret <8 x i16> %a
119}
120
121; CHECK-LABEL: mul_v8i16
122; NO-SIMD128-NOT: i16x8
123; SIMD128: .param v128, v128{{$}}
124; SIMD128: .result v128{{$}}
Heejin Ahnc15a8782018-08-14 19:10:50 +0000125; SIMD128: i16x8.mul $push0=, $0, $1 # encoding: [0xfd,0x21]{{$}}
126; SIMD128: return $pop0 #
Derek Schuff39bf39f2016-08-02 23:16:09 +0000127define <8 x i16> @mul_v8i16(<8 x i16> %x, <8 x i16> %y) {
128 %a = mul <8 x i16> %x, %y
129 ret <8 x i16> %a
130}
131
Thomas Livelyc1742572018-08-23 00:48:37 +0000132; CHECK-LABEL: and_v8i16
133; NO-SIMD128-NOT: i16x8
134; SIMD128: .param v128, v128{{$}}
135; SIMD128: .result v128{{$}}
136; SIMD128: v128.and $push0=, $0, $1 # encoding: [0xfd,0x3b]{{$}}
137; SIMD128: return $pop0 #
138define <8 x i16> @and_v8i16(<8 x i16> %x, <8 x i16> %y) {
139 %a = and <8 x i16> %x, %y
140 ret <8 x i16> %a
141}
142
143; CHECK-LABEL: or_v8i16
144; NO-SIMD128-NOT: i16x8
145; SIMD128: .param v128, v128{{$}}
146; SIMD128: .result v128{{$}}
147; SIMD128: v128.or $push0=, $0, $1 # encoding: [0xfd,0x3c]{{$}}
148; SIMD128: return $pop0 #
149define <8 x i16> @or_v8i16(<8 x i16> %x, <8 x i16> %y) {
150 %a = or <8 x i16> %x, %y
151 ret <8 x i16> %a
152}
153
154; CHECK-LABEL: xor_v8i16
155; NO-SIMD128-NOT: i16x8
156; SIMD128: .param v128, v128{{$}}
157; SIMD128: .result v128{{$}}
158; SIMD128: v128.xor $push0=, $0, $1 # encoding: [0xfd,0x3d]{{$}}
159; SIMD128: return $pop0 #
160define <8 x i16> @xor_v8i16(<8 x i16> %x, <8 x i16> %y) {
161 %a = xor <8 x i16> %x, %y
162 ret <8 x i16> %a
163}
164
Thomas Lively995ad612018-08-28 18:31:15 +0000165; CHECK-LABEL: not_v8i16
166; NO-SIMD128-NOT: v128
167; SIMD128: .param v128{{$}}
168; SIMD128: .result v128{{$}}
169; SIMD128: v128.not $push0=, $0 # encoding: [0xfd,0x3e]{{$}}
170; SIMD128: return $pop0 #
171define <8 x i16> @not_v8i16(<8 x i16> %x) {
172 %a = xor <8 x i16> %x, <i16 -1, i16 -1, i16 -1, i16 -1,
173 i16 -1, i16 -1, i16 -1, i16 -1>
174 ret <8 x i16> %a
175}
176
Derek Schuff39bf39f2016-08-02 23:16:09 +0000177; ==============================================================================
178; 4 x i32
179; ==============================================================================
180; CHECK-LABEL: add_v4i32
181; NO-SIMD128-NOT: i32x4
182; SIMD128: .param v128, v128{{$}}
183; SIMD128: .result v128{{$}}
Heejin Ahnc15a8782018-08-14 19:10:50 +0000184; SIMD128: i32x4.add $push0=, $0, $1 # encoding: [0xfd,0x1a]{{$}}
185; SIMD128: return $pop0 #
Derek Schuff39bf39f2016-08-02 23:16:09 +0000186define <4 x i32> @add_v4i32(<4 x i32> %x, <4 x i32> %y) {
187 %a = add <4 x i32> %x, %y
188 ret <4 x i32> %a
189}
190
191; CHECK-LABEL: sub_v4i32
192; NO-SIMD128-NOT: i32x4
193; SIMD128: .param v128, v128{{$}}
194; SIMD128: .result v128{{$}}
Heejin Ahnc15a8782018-08-14 19:10:50 +0000195; SIMD128: i32x4.sub $push0=, $0, $1 # encoding: [0xfd,0x1e]{{$}}
196; SIMD128: return $pop0 #
Derek Schuff39bf39f2016-08-02 23:16:09 +0000197define <4 x i32> @sub_v4i32(<4 x i32> %x, <4 x i32> %y) {
198 %a = sub <4 x i32> %x, %y
199 ret <4 x i32> %a
200}
201
202; CHECK-LABEL: mul_v4i32
203; NO-SIMD128-NOT: i32x4
204; SIMD128: .param v128, v128{{$}}
205; SIMD128: .result v128{{$}}
Heejin Ahnc15a8782018-08-14 19:10:50 +0000206; SIMD128: i32x4.mul $push0=, $0, $1 # encoding: [0xfd,0x22]{{$}}
207; SIMD128: return $pop0 #
Derek Schuff39bf39f2016-08-02 23:16:09 +0000208define <4 x i32> @mul_v4i32(<4 x i32> %x, <4 x i32> %y) {
209 %a = mul <4 x i32> %x, %y
210 ret <4 x i32> %a
211}
212
Thomas Livelyc1742572018-08-23 00:48:37 +0000213; CHECK-LABEL: and_v4i32
214; NO-SIMD128-NOT: i32x4
215; SIMD128: .param v128, v128{{$}}
216; SIMD128: .result v128{{$}}
217; SIMD128: v128.and $push0=, $0, $1 # encoding: [0xfd,0x3b]{{$}}
218; SIMD128: return $pop0 #
219define <4 x i32> @and_v4i32(<4 x i32> %x, <4 x i32> %y) {
220 %a = and <4 x i32> %x, %y
221 ret <4 x i32> %a
222}
223
224; CHECK-LABEL: or_v4i32
225; NO-SIMD128-NOT: i32x4
226; SIMD128: .param v128, v128{{$}}
227; SIMD128: .result v128{{$}}
228; SIMD128: v128.or $push0=, $0, $1 # encoding: [0xfd,0x3c]{{$}}
229; SIMD128: return $pop0 #
230define <4 x i32> @or_v4i32(<4 x i32> %x, <4 x i32> %y) {
231 %a = or <4 x i32> %x, %y
232 ret <4 x i32> %a
233}
234
235; CHECK-LABEL: xor_v4i32
236; NO-SIMD128-NOT: i32x4
237; SIMD128: .param v128, v128{{$}}
238; SIMD128: .result v128{{$}}
239; SIMD128: v128.xor $push0=, $0, $1 # encoding: [0xfd,0x3d]{{$}}
240; SIMD128: return $pop0 #
241define <4 x i32> @xor_v4i32(<4 x i32> %x, <4 x i32> %y) {
242 %a = xor <4 x i32> %x, %y
243 ret <4 x i32> %a
244}
245
Thomas Lively995ad612018-08-28 18:31:15 +0000246; CHECK-LABEL: not_v4i32
247; NO-SIMD128-NOT: v128
248; SIMD128: .param v128{{$}}
249; SIMD128: .result v128{{$}}
250; SIMD128: v128.not $push0=, $0 # encoding: [0xfd,0x3e]{{$}}
251; SIMD128: return $pop0 #
252define <4 x i32> @not_v4i32(<4 x i32> %x) {
253 %a = xor <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
254 ret <4 x i32> %a
255}
256
Derek Schuff39bf39f2016-08-02 23:16:09 +0000257; ==============================================================================
Derek Schuff51ed1312018-08-07 21:24:01 +0000258; 2 x i64
259; ==============================================================================
260; CHECK-LABEL: add_v2i64
261; NO-SIMD128-NOT: i64x2
Heejin Ahn5831e9c2018-08-09 23:58:51 +0000262; SIMD128-VM-NOT: i64x2
Derek Schuff51ed1312018-08-07 21:24:01 +0000263; SIMD128: .param v128, v128{{$}}
264; SIMD128: .result v128{{$}}
Heejin Ahnc15a8782018-08-14 19:10:50 +0000265; SIMD128: i64x2.add $push0=, $0, $1 # encoding: [0xfd,0x1b]{{$}}
266; SIMD128: return $pop0 #
Derek Schuff51ed1312018-08-07 21:24:01 +0000267define <2 x i64> @add_v2i64(<2 x i64> %x, <2 x i64> %y) {
268 %a = add <2 x i64> %x, %y
269 ret <2 x i64> %a
270}
271
272; CHECK-LABEL: sub_v2i64
273; NO-SIMD128-NOT: i64x2
Heejin Ahn5831e9c2018-08-09 23:58:51 +0000274; SIMD128-VM-NOT: i64x2
Derek Schuff51ed1312018-08-07 21:24:01 +0000275; SIMD128: .param v128, v128{{$}}
276; SIMD128: .result v128{{$}}
Heejin Ahnc15a8782018-08-14 19:10:50 +0000277; SIMD128: i64x2.sub $push0=, $0, $1 # encoding: [0xfd,0x1f]{{$}}
278; SIMD128: return $pop0 #
Derek Schuff51ed1312018-08-07 21:24:01 +0000279define <2 x i64> @sub_v2i64(<2 x i64> %x, <2 x i64> %y) {
280 %a = sub <2 x i64> %x, %y
281 ret <2 x i64> %a
282}
283
Thomas Lively2ee686d2018-08-22 23:06:27 +0000284; v2i64.mul is not in spec
Derek Schuff51ed1312018-08-07 21:24:01 +0000285; CHECK-LABEL: mul_v2i64
286; NO-SIMD128-NOT: i64x2
Heejin Ahn5831e9c2018-08-09 23:58:51 +0000287; SIMD128-VM-NOT: i64x2
Thomas Lively2ee686d2018-08-22 23:06:27 +0000288; SIMD128-NOT: i64x2.mul
289; SIMD128: i64x2.extract_lane
290; SIMD128: i64.mul
Derek Schuff51ed1312018-08-07 21:24:01 +0000291define <2 x i64> @mul_v2i64(<2 x i64> %x, <2 x i64> %y) {
292 %a = mul <2 x i64> %x, %y
293 ret <2 x i64> %a
294}
295
Thomas Livelyc1742572018-08-23 00:48:37 +0000296; CHECK-LABEL: and_v2i64
297; NO-SIMD128-NOT: i64x2
298; SIMD128: .param v128, v128{{$}}
299; SIMD128: .result v128{{$}}
300; SIMD128: v128.and $push0=, $0, $1 # encoding: [0xfd,0x3b]{{$}}
301; SIMD128: return $pop0 #
302define <2 x i64> @and_v2i64(<2 x i64> %x, <2 x i64> %y) {
303 %a = and <2 x i64> %x, %y
304 ret <2 x i64> %a
305}
306
307; CHECK-LABEL: or_v2i64
308; NO-SIMD128-NOT: i64x2
309; SIMD128: .param v128, v128{{$}}
310; SIMD128: .result v128{{$}}
311; SIMD128: v128.or $push0=, $0, $1 # encoding: [0xfd,0x3c]{{$}}
312; SIMD128: return $pop0 #
313define <2 x i64> @or_v2i64(<2 x i64> %x, <2 x i64> %y) {
314 %a = or <2 x i64> %x, %y
315 ret <2 x i64> %a
316}
317
318; CHECK-LABEL: xor_v2i64
319; NO-SIMD128-NOT: i64x2
320; SIMD128: .param v128, v128{{$}}
321; SIMD128: .result v128{{$}}
322; SIMD128: v128.xor $push0=, $0, $1 # encoding: [0xfd,0x3d]{{$}}
323; SIMD128: return $pop0 #
324define <2 x i64> @xor_v2i64(<2 x i64> %x, <2 x i64> %y) {
325 %a = xor <2 x i64> %x, %y
326 ret <2 x i64> %a
327}
328
Thomas Lively995ad612018-08-28 18:31:15 +0000329; CHECK-LABEL: not_v2i64
330; NO-SIMD128-NOT: v128
331; SIMD128-VM-NOT: v128
332; SIMD128: .param v128{{$}}
333; SIMD128: .result v128{{$}}
334; SIMD128: v128.not $push0=, $0 # encoding: [0xfd,0x3e]{{$}}
335; SIMD128: return $pop0 #
336define <2 x i64> @not_v2i64(<2 x i64> %x) {
337 %a = xor <2 x i64> %x, <i64 -1, i64 -1>
338 ret <2 x i64> %a
339}
340
Derek Schuff51ed1312018-08-07 21:24:01 +0000341; ==============================================================================
Derek Schuff39bf39f2016-08-02 23:16:09 +0000342; 4 x float
343; ==============================================================================
344; CHECK-LABEL: add_v4f32
345; NO-SIMD128-NOT: f32x4
346; SIMD128: .param v128, v128{{$}}
347; SIMD128: .result v128{{$}}
Heejin Ahnc15a8782018-08-14 19:10:50 +0000348; SIMD128: f32x4.add $push0=, $0, $1 # encoding: [0xfd,0x7a]{{$}}
349; SIMD128: return $pop0 #
Derek Schuff39bf39f2016-08-02 23:16:09 +0000350define <4 x float> @add_v4f32(<4 x float> %x, <4 x float> %y) {
351 %a = fadd <4 x float> %x, %y
352 ret <4 x float> %a
353}
354
355; CHECK-LABEL: sub_v4f32
356; NO-SIMD128-NOT: f32x4
357; SIMD128: .param v128, v128{{$}}
358; SIMD128: .result v128{{$}}
Heejin Ahnc15a8782018-08-14 19:10:50 +0000359; SIMD128: f32x4.sub $push0=, $0, $1 # encoding: [0xfd,0x7c]{{$}}
360; SIMD128: return $pop0 #
Derek Schuff39bf39f2016-08-02 23:16:09 +0000361define <4 x float> @sub_v4f32(<4 x float> %x, <4 x float> %y) {
362 %a = fsub <4 x float> %x, %y
363 ret <4 x float> %a
364}
365
Derek Schuff51ed1312018-08-07 21:24:01 +0000366; CHECK-LABEL: div_v4f32
367; NO-SIMD128-NOT: f32x4
368; SIMD128: .param v128, v128{{$}}
369; SIMD128: .result v128{{$}}
Heejin Ahnc15a8782018-08-14 19:10:50 +0000370; SIMD128: f32x4.div $push0=, $0, $1 # encoding: [0xfd,0x7e]{{$}}
371; SIMD128: return $pop0 #
Derek Schuff51ed1312018-08-07 21:24:01 +0000372define <4 x float> @div_v4f32(<4 x float> %x, <4 x float> %y) {
373 %a = fdiv <4 x float> %x, %y
374 ret <4 x float> %a
375}
376
Derek Schuff39bf39f2016-08-02 23:16:09 +0000377; CHECK-LABEL: mul_v4f32
378; NO-SIMD128-NOT: f32x4
379; SIMD128: .param v128, v128{{$}}
380; SIMD128: .result v128{{$}}
Heejin Ahnc15a8782018-08-14 19:10:50 +0000381; SIMD128: f32x4.mul $push0=, $0, $1 # encoding: [0xfd,0x80]{{$}}
382; SIMD128: return $pop0 #
Derek Schuff39bf39f2016-08-02 23:16:09 +0000383define <4 x float> @mul_v4f32(<4 x float> %x, <4 x float> %y) {
384 %a = fmul <4 x float> %x, %y
385 ret <4 x float> %a
386}
387
Derek Schuff51ed1312018-08-07 21:24:01 +0000388; ==============================================================================
389; 2 x double
390; ==============================================================================
391; CHECK-LABEL: add_v2f64
392; NO-SIMD128-NOT: f64x2
Heejin Ahn5831e9c2018-08-09 23:58:51 +0000393; SIMD129-VM-NOT: f62x2
Derek Schuff51ed1312018-08-07 21:24:01 +0000394; SIMD128: .param v128, v128{{$}}
395; SIMD128: .result v128{{$}}
Heejin Ahnc15a8782018-08-14 19:10:50 +0000396; SIMD128: f64x2.add $push0=, $0, $1 # encoding: [0xfd,0x7b]{{$}}
397; SIMD128: return $pop0 #
Derek Schuff51ed1312018-08-07 21:24:01 +0000398define <2 x double> @add_v2f64(<2 x double> %x, <2 x double> %y) {
399 %a = fadd <2 x double> %x, %y
400 ret <2 x double> %a
401}
402
403; CHECK-LABEL: sub_v2f64
404; NO-SIMD128-NOT: f64x2
Heejin Ahn5831e9c2018-08-09 23:58:51 +0000405; SIMD129-VM-NOT: f62x2
Derek Schuff51ed1312018-08-07 21:24:01 +0000406; SIMD128: .param v128, v128{{$}}
407; SIMD128: .result v128{{$}}
Heejin Ahnc15a8782018-08-14 19:10:50 +0000408; SIMD128: f64x2.sub $push0=, $0, $1 # encoding: [0xfd,0x7d]{{$}}
409; SIMD128: return $pop0 #
Derek Schuff51ed1312018-08-07 21:24:01 +0000410define <2 x double> @sub_v2f64(<2 x double> %x, <2 x double> %y) {
411 %a = fsub <2 x double> %x, %y
412 ret <2 x double> %a
413}
414
415; CHECK-LABEL: div_v2f64
416; NO-SIMD128-NOT: f64x2
Heejin Ahn5831e9c2018-08-09 23:58:51 +0000417; SIMD129-VM-NOT: f62x2
Derek Schuff51ed1312018-08-07 21:24:01 +0000418; SIMD128: .param v128, v128{{$}}
419; SIMD128: .result v128{{$}}
Heejin Ahnc15a8782018-08-14 19:10:50 +0000420; SIMD128: f64x2.div $push0=, $0, $1 # encoding: [0xfd,0x7f]{{$}}
421; SIMD128: return $pop0 #
Derek Schuff51ed1312018-08-07 21:24:01 +0000422define <2 x double> @div_v2f64(<2 x double> %x, <2 x double> %y) {
423 %a = fdiv <2 x double> %x, %y
424 ret <2 x double> %a
425}
426
427; CHECK-LABEL: mul_v2f64
428; NO-SIMD128-NOT: f64x2
Heejin Ahn5831e9c2018-08-09 23:58:51 +0000429; SIMD129-VM-NOT: f62x2
Derek Schuff51ed1312018-08-07 21:24:01 +0000430; SIMD128: .param v128, v128{{$}}
431; SIMD128: .result v128{{$}}
Heejin Ahnc15a8782018-08-14 19:10:50 +0000432; SIMD128: f64x2.mul $push0=, $0, $1 # encoding: [0xfd,0x81]{{$}}
433; SIMD128: return $pop0 #
Derek Schuff51ed1312018-08-07 21:24:01 +0000434define <2 x double> @mul_v2f64(<2 x double> %x, <2 x double> %y) {
435 %a = fmul <2 x double> %x, %y
436 ret <2 x double> %a
437}