blob: 11383fa3082980663fd8571f03690edfb75b01df [file] [log] [blame]
Mon P Wanga8ff0dd2010-01-24 00:05:03 +00001; RUN: llc < %s -o - -march=x86-64 -mattr=+sse42 -disable-mmx | FileCheck %s
2
3; Test based on pr5626 to load/store
4;
5
6%i32vec3 = type <3 x i32>
7define void @add3i32(%i32vec3* sret %ret, %i32vec3* %ap, %i32vec3* %bp) {
8; CHECK: movaps
9; CHECK: paddd
10; CHECK: pextrd
11; CHECK: movq
12 %a = load %i32vec3* %ap, align 16
13 %b = load %i32vec3* %bp, align 16
14 %x = add %i32vec3 %a, %b
15 store %i32vec3 %x, %i32vec3* %ret, align 16
16 ret void
17}
18
19define void @add3i32_2(%i32vec3* sret %ret, %i32vec3* %ap, %i32vec3* %bp) {
20; CHECK: movq
21; CHECK: pinsrd
22; CHECK: movq
23; CHECK: pinsrd
24; CHECK: paddd
25; CHECK: pextrd
26; CHECK: movq
27 %a = load %i32vec3* %ap
28 %b = load %i32vec3* %bp
29 %x = add %i32vec3 %a, %b
30 store %i32vec3 %x, %i32vec3* %ret
31 ret void
32}
33
34%i32vec7 = type <7 x i32>
35define void @add7i32(%i32vec7* sret %ret, %i32vec7* %ap, %i32vec7* %bp) {
36; CHECK: movaps
37; CHECK: movaps
38; CHECK: paddd
39; CHECK: paddd
40; CHECK: pextrd
41; CHECK: movq
42; CHECK: movaps
43 %a = load %i32vec7* %ap, align 16
44 %b = load %i32vec7* %bp, align 16
45 %x = add %i32vec7 %a, %b
46 store %i32vec7 %x, %i32vec7* %ret, align 16
47 ret void
48}
49
50%i32vec12 = type <12 x i32>
51define void @add12i32(%i32vec12* sret %ret, %i32vec12* %ap, %i32vec12* %bp) {
52; CHECK: movaps
53; CHECK: movaps
54; CHECK: movaps
55; CHECK: paddd
56; CHECK: paddd
57; CHECK: paddd
58; CHECK: movaps
59; CHECK: movaps
60; CHECK: movaps
61 %a = load %i32vec12* %ap, align 16
62 %b = load %i32vec12* %bp, align 16
63 %x = add %i32vec12 %a, %b
64 store %i32vec12 %x, %i32vec12* %ret, align 16
65 ret void
66}
67
68
69%i16vec3 = type <3 x i16>
70define void @add3i16(%i16vec3* nocapture sret %ret, %i16vec3* %ap, %i16vec3* %bp) nounwind {
71; CHECK: movaps
72; CHECK: paddw
73; CHECK: movd
74; CHECK: pextrw
75 %a = load %i16vec3* %ap, align 16
76 %b = load %i16vec3* %bp, align 16
77 %x = add %i16vec3 %a, %b
78 store %i16vec3 %x, %i16vec3* %ret, align 16
79 ret void
80}
81
82%i16vec4 = type <4 x i16>
83define void @add4i16(%i16vec4* nocapture sret %ret, %i16vec4* %ap, %i16vec4* %bp) nounwind {
84; CHECK: movaps
85; CHECK: paddw
86; CHECK: movq
87 %a = load %i16vec4* %ap, align 16
88 %b = load %i16vec4* %bp, align 16
89 %x = add %i16vec4 %a, %b
90 store %i16vec4 %x, %i16vec4* %ret, align 16
91 ret void
92}
93
94%i16vec12 = type <12 x i16>
95define void @add12i16(%i16vec12* nocapture sret %ret, %i16vec12* %ap, %i16vec12* %bp) nounwind {
96; CHECK: movaps
97; CHECK: movaps
98; CHECK: paddw
99; CHECK: paddw
100; CHECK: movq
101; CHECK: movaps
102 %a = load %i16vec12* %ap, align 16
103 %b = load %i16vec12* %bp, align 16
104 %x = add %i16vec12 %a, %b
105 store %i16vec12 %x, %i16vec12* %ret, align 16
106 ret void
107}
108
109%i16vec18 = type <18 x i16>
110define void @add18i16(%i16vec18* nocapture sret %ret, %i16vec18* %ap, %i16vec18* %bp) nounwind {
111; CHECK: movaps
112; CHECK: movaps
113; CHECK: movaps
114; CHECK: paddw
115; CHECK: paddw
116; CHECK: paddw
117; CHECK: movd
118; CHECK: movaps
119; CHECK: movaps
120 %a = load %i16vec18* %ap, align 16
121 %b = load %i16vec18* %bp, align 16
122 %x = add %i16vec18 %a, %b
123 store %i16vec18 %x, %i16vec18* %ret, align 16
124 ret void
125}
126
127
128%i8vec3 = type <3 x i8>
129define void @add3i8(%i8vec3* nocapture sret %ret, %i8vec3* %ap, %i8vec3* %bp) nounwind {
130; CHECK: movaps
131; CHECK: paddb
132; CHECK: pextrb
133; CHECK: movb
134 %a = load %i8vec3* %ap, align 16
135 %b = load %i8vec3* %bp, align 16
136 %x = add %i8vec3 %a, %b
137 store %i8vec3 %x, %i8vec3* %ret, align 16
138 ret void
139}
140
141%i8vec31 = type <31 x i8>
142define void @add31i8(%i8vec31* nocapture sret %ret, %i8vec31* %ap, %i8vec31* %bp) nounwind {
143; CHECK: movaps
144; CHECK: movaps
145; CHECK: paddb
146; CHECK: paddb
147; CHECK: movq
148; CHECK: pextrb
149; CHECK: pextrw
150 %a = load %i8vec31* %ap, align 16
151 %b = load %i8vec31* %bp, align 16
152 %x = add %i8vec31 %a, %b
153 store %i8vec31 %x, %i8vec31* %ret, align 16
154 ret void
155}