blob: 217fd696237e30daf1723030061ae7cb00d0fb9e [file] [log] [blame]
Tim Northoverd8407452013-10-01 14:33:28 +00001; RUN: llc -mtriple=arm-none-none-eabi -mcpu=cortex-a15 -o - %s | FileCheck --check-prefix=CHECK-A %s
2; RUN: llc -mtriple=thumb-none-none-eabi -mcpu=cortex-a15 -o - %s | FileCheck --check-prefix=CHECK-A-THUMB %s
3; RUN: llc -mtriple=thumb-apple-darwin -mcpu=cortex-m3 -o - %s | FileCheck --check-prefix=CHECK-M %s
4
5declare arm_aapcscc void @bar()
6
7@bigvar = global [16 x i32] zeroinitializer
8
9define arm_aapcscc void @irq_fn() alignstack(8) "interrupt"="IRQ" {
10 ; Must save all registers except banked sp and lr (we save lr anyway because
11 ; we actually need it at the end to execute the return ourselves).
12
13 ; Also need special function return setting pc and CPSR simultaneously.
14; CHECK-A-LABEL: irq_fn:
15; CHECK-A: push {r0, r1, r2, r3, r11, lr}
16; CHECK-A: add r11, sp, #16
17; CHECK-A: sub sp, sp, #{{[0-9]+}}
18; CHECK-A: bic sp, sp, #7
19; CHECK-A: bl bar
20; CHECK-A: sub sp, r11, #16
21; CHECK-A: pop {r0, r1, r2, r3, r11, lr}
22; CHECK-A: subs pc, lr, #4
23
24; CHECK-A-THUMB-LABEL: irq_fn:
25; CHECK-A-THUMB: push {r0, r1, r2, r3, r4, r7, lr}
26; CHECK-A-THUMB: mov r4, sp
27; CHECK-A-THUMB: add r7, sp, #20
28; CHECK-A-THUMB: bic r4, r4, #7
29; CHECK-A-THUMB: bl bar
30; CHECK-A-THUMB: sub.w r4, r7, #20
31; CHECK-A-THUMB: mov sp, r4
32; CHECK-A-THUMB: pop.w {r0, r1, r2, r3, r4, r7, lr}
33; CHECK-A-THUMB: subs pc, lr, #4
34
35 ; Normal AAPCS function (r0-r3 pushed onto stack by hardware, lr set to
36 ; appropriate sentinel so no special return needed).
37; CHECK-M: push {r4, r7, lr}
38; CHECK-M: add r7, sp, #4
39; CHECK-M: sub sp, #4
40; CHECK-M: mov r4, sp
41; CHECK-M: mov sp, r4
42; CHECK-M: blx _bar
43; CHECK-M: subs r4, r7, #4
44; CHECK-M: mov sp, r4
45; CHECK-M: pop {r4, r7, pc}
46
47 call arm_aapcscc void @bar()
48 ret void
49}
50
51define arm_aapcscc void @fiq_fn() alignstack(8) "interrupt"="FIQ" {
52; CHECK-A-LABEL: fiq_fn:
53; CHECK-A: push {r0, r1, r2, r3, r4, r5, r6, r7, r11, lr}
54 ; 32 to get past r0, r1, ..., r7
55; CHECK-A: add r11, sp, #32
56; CHECK-A: sub sp, sp, #{{[0-9]+}}
57; CHECK-A: bic sp, sp, #7
58; [...]
59 ; 32 must match above
60; CHECK-A: sub sp, r11, #32
61; CHECK-A: pop {r0, r1, r2, r3, r4, r5, r6, r7, r11, lr}
62; CHECK-A: subs pc, lr, #4
63
64 %val = load volatile [16 x i32]* @bigvar
65 store volatile [16 x i32] %val, [16 x i32]* @bigvar
66 ret void
67}
68
69define arm_aapcscc void @swi_fn() alignstack(8) "interrupt"="SWI" {
70; CHECK-A-LABEL: swi_fn:
71; CHECK-A: push {r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr}
72; CHECK-A: add r11, sp, #44
73; CHECK-A: sub sp, sp, #{{[0-9]+}}
74; CHECK-A: bic sp, sp, #7
75; [...]
76; CHECK-A: sub sp, r11, #44
77; CHECK-A: pop {r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr}
78; CHECK-A: subs pc, lr, #0
79
80 %val = load volatile [16 x i32]* @bigvar
81 store volatile [16 x i32] %val, [16 x i32]* @bigvar
82 ret void
83}
84
85define arm_aapcscc void @undef_fn() alignstack(8) "interrupt"="UNDEF" {
86; CHECK-A-LABEL: undef_fn:
87; CHECK-A: push {r0, r1, r2, r3, r11, lr}
88; CHECK-A: add r11, sp, #16
89; CHECK-A: sub sp, sp, #{{[0-9]+}}
90; CHECK-A: bic sp, sp, #7
91; [...]
92; CHECK-A: sub sp, r11, #16
93; CHECK-A: pop {r0, r1, r2, r3, r11, lr}
94; CHECK-A: subs pc, lr, #0
95
96 call void @bar()
97 ret void
98}
99
100define arm_aapcscc void @abort_fn() alignstack(8) "interrupt"="ABORT" {
101; CHECK-A-LABEL: abort_fn:
102; CHECK-A: push {r0, r1, r2, r3, r11, lr}
103; CHECK-A: add r11, sp, #16
104; CHECK-A: sub sp, sp, #{{[0-9]+}}
105; CHECK-A: bic sp, sp, #7
106; [...]
107; CHECK-A: sub sp, r11, #16
108; CHECK-A: pop {r0, r1, r2, r3, r11, lr}
109; CHECK-A: subs pc, lr, #4
110
111 call void @bar()
112 ret void
113}
114
115@var = global double 0.0
116
117; We don't save VFP regs, since it would be a massive overhead in the general
118; case.
119define arm_aapcscc void @floating_fn() alignstack(8) "interrupt"="IRQ" {
120; CHECK-A-LABEL: floating_fn:
121; CHECK-A-NOT: vpush
122; CHECK-A-NOT: vstr
123; CHECK-A-NOT: vstm
124; CHECK-A: vadd.f64 {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
125 %lhs = load volatile double* @var
126 %rhs = load volatile double* @var
127 %sum = fadd double %lhs, %rhs
128 store double %sum, double* @var
129 ret void
130}