blob: 545cd36ab957e9e05fbc69d2cb8eb7076638ea4f [file] [log] [blame]
Juergen Ributzkae6250132014-01-17 19:47:03 +00001; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7 | FileCheck --check-prefix=SSE %s
2; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx | FileCheck --check-prefix=AVX %s
3
4define preserve_allcc void @preserve_allcc1() nounwind {
5entry:
6;SSE-LABEL: preserve_allcc1
7;SSE: pushq %r10
8;SSE-NEXT: pushq %r9
9;SSE-NEXT: pushq %r8
10;SSE-NEXT: pushq %rdi
11;SSE-NEXT: pushq %rsi
12;SSE-NEXT: pushq %rdx
13;SSE-NEXT: pushq %rcx
14;SSE-NEXT: pushq %rax
15;SSE-NEXT: pushq %rbp
16;SSE-NEXT: pushq %r15
17;SSE-NEXT: pushq %r14
18;SSE-NEXT: pushq %r13
19;SSE-NEXT: pushq %r12
20;SSE-NEXT: pushq %rbx
21;SSE: movaps %xmm15
22;SSE-NEXT: movaps %xmm14
23;SSE-NEXT: movaps %xmm13
24;SSE-NEXT: movaps %xmm12
25;SSE-NEXT: movaps %xmm11
26;SSE-NEXT: movaps %xmm10
27;SSE-NEXT: movaps %xmm9
28;SSE-NEXT: movaps %xmm8
29;SSE-NEXT: movaps %xmm7
30;SSE-NEXT: movaps %xmm6
31;SSE-NEXT: movaps %xmm5
32;SSE-NEXT: movaps %xmm4
33;SSE-NEXT: movaps %xmm3
34;SSE-NEXT: movaps %xmm2
35;SSE-NEXT: movaps %xmm1
36;SSE-NEXT: movaps %xmm0
37;AVX-LABEL: preserve_allcc1
38;AVX: pushq %r10
39;AVX-NEXT: pushq %r9
40;AVX-NEXT: pushq %r8
41;AVX-NEXT: pushq %rdi
42;AVX-NEXT: pushq %rsi
43;AVX-NEXT: pushq %rdx
44;AVX-NEXT: pushq %rcx
45;AVX-NEXT: pushq %rax
46;AVX-NEXT: pushq %rbp
47;AVX-NEXT: pushq %r15
48;AVX-NEXT: pushq %r14
49;AVX-NEXT: pushq %r13
50;AVX-NEXT: pushq %r12
51;AVX-NEXT: pushq %rbx
52;AVX: vmovups %ymm15
53;AVX-NEXT: vmovups %ymm14
54;AVX-NEXT: vmovups %ymm13
55;AVX-NEXT: vmovups %ymm12
56;AVX-NEXT: vmovups %ymm11
57;AVX-NEXT: vmovups %ymm10
58;AVX-NEXT: vmovups %ymm9
59;AVX-NEXT: vmovups %ymm8
60;AVX-NEXT: vmovups %ymm7
61;AVX-NEXT: vmovups %ymm6
62;AVX-NEXT: vmovups %ymm5
63;AVX-NEXT: vmovups %ymm4
64;AVX-NEXT: vmovups %ymm3
65;AVX-NEXT: vmovups %ymm2
66;AVX-NEXT: vmovups %ymm1
67;AVX-NEXT: vmovups %ymm0
68 call void asm sideeffect "", "~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{rbp},~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15}"()
69 ret void
70}
71
72; Make sure only R11 is saved before the call
73declare preserve_allcc void @bar(i64, i64, double, double)
74define void @preserve_allcc2() nounwind {
75entry:
76;SSE-LABEL: preserve_allcc2
77;SSE: movq %r11, [[REG:%[a-z0-9]+]]
78;SSE-NOT: movaps %xmm
79;SSE: movq [[REG]], %r11
80 %a0 = call i64 asm sideeffect "", "={rax}"() nounwind
81 %a1 = call i64 asm sideeffect "", "={rcx}"() nounwind
82 %a2 = call i64 asm sideeffect "", "={rdx}"() nounwind
83 %a3 = call i64 asm sideeffect "", "={r8}"() nounwind
84 %a4 = call i64 asm sideeffect "", "={r9}"() nounwind
85 %a5 = call i64 asm sideeffect "", "={r10}"() nounwind
86 %a6 = call i64 asm sideeffect "", "={r11}"() nounwind
87 %a10 = call <2 x double> asm sideeffect "", "={xmm2}"() nounwind
88 %a11 = call <2 x double> asm sideeffect "", "={xmm3}"() nounwind
89 %a12 = call <2 x double> asm sideeffect "", "={xmm4}"() nounwind
90 %a13 = call <2 x double> asm sideeffect "", "={xmm5}"() nounwind
91 %a14 = call <2 x double> asm sideeffect "", "={xmm6}"() nounwind
92 %a15 = call <2 x double> asm sideeffect "", "={xmm7}"() nounwind
93 %a16 = call <2 x double> asm sideeffect "", "={xmm8}"() nounwind
94 %a17 = call <2 x double> asm sideeffect "", "={xmm9}"() nounwind
95 %a18 = call <2 x double> asm sideeffect "", "={xmm10}"() nounwind
96 %a19 = call <2 x double> asm sideeffect "", "={xmm11}"() nounwind
97 %a20 = call <2 x double> asm sideeffect "", "={xmm12}"() nounwind
98 %a21 = call <2 x double> asm sideeffect "", "={xmm13}"() nounwind
99 %a22 = call <2 x double> asm sideeffect "", "={xmm14}"() nounwind
100 %a23 = call <2 x double> asm sideeffect "", "={xmm15}"() nounwind
101 call preserve_allcc void @bar(i64 1, i64 2, double 3.0, double 4.0)
102 call void asm sideeffect "", "{rax},{rcx},{rdx},{r8},{r9},{r10},{r11},{xmm2},{xmm3},{xmm4},{xmm5},{xmm6},{xmm7},{xmm8},{xmm9},{xmm10},{xmm11},{xmm12},{xmm13},{xmm14},{xmm15}"(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, <2 x double> %a10, <2 x double> %a11, <2 x double> %a12, <2 x double> %a13, <2 x double> %a14, <2 x double> %a15, <2 x double> %a16, <2 x double> %a17, <2 x double> %a18, <2 x double> %a19, <2 x double> %a20, <2 x double> %a21, <2 x double> %a22, <2 x double> %a23)
103 ret void
104}