blob: 0d656dcfba9df0b49ab73a01dd8639689371ec9f [file] [log] [blame]
John McCall7f416cc2015-09-08 08:05:57 +00001// RUN: %clang_cc1 -triple mips-unknown-linux -o - -emit-llvm %s | FileCheck %s -check-prefix=ALL -check-prefix=O32
2// RUN: %clang_cc1 -triple mipsel-unknown-linux -o - -emit-llvm %s | FileCheck %s -check-prefix=ALL -check-prefix=O32
3// RUN: %clang_cc1 -triple mips64-unknown-linux -o - -emit-llvm -target-abi n32 %s | FileCheck %s -check-prefix=ALL -check-prefix=N32 -check-prefix=NEW
4// RUN: %clang_cc1 -triple mips64-unknown-linux -o - -emit-llvm -target-abi n32 %s | FileCheck %s -check-prefix=ALL -check-prefix=N32 -check-prefix=NEW
5// RUN: %clang_cc1 -triple mips64-unknown-linux -o - -emit-llvm %s | FileCheck %s -check-prefix=ALL -check-prefix=N64 -check-prefix=NEW
6// RUN: %clang_cc1 -triple mips64el-unknown-linux -o - -emit-llvm %s | FileCheck %s -check-prefix=ALL -check-prefix=N64 -check-prefix=NEW
Daniel Sanders8d36a612014-09-22 13:27:06 +00007
8#include <stdarg.h>
9
10typedef int v4i32 __attribute__ ((__vector_size__ (16)));
11
Daniel Sanders59229dc2014-11-19 10:01:35 +000012int test_i32(char *fmt, ...) {
13 va_list va;
14
15 va_start(va, fmt);
16 int v = va_arg(va, int);
17 va_end(va);
18
19 return v;
20}
21
22// ALL-LABEL: define i32 @test_i32(i8*{{.*}} %fmt, ...)
23//
24// O32: %va = alloca i8*, align [[PTRALIGN:4]]
25// N32: %va = alloca i8*, align [[PTRALIGN:4]]
26// N64: %va = alloca i8*, align [[PTRALIGN:8]]
John McCall7f416cc2015-09-08 08:05:57 +000027// ALL: [[V:%.*]] = alloca i32, align 4
28// NEW: [[PROMOTION_TEMP:%.*]] = alloca i32, align 4
29//
30// ALL: [[VA:%.+]] = bitcast i8** %va to i8*
31// ALL: call void @llvm.va_start(i8* [[VA]])
32// ALL: [[AP_CUR:%.+]] = load i8*, i8** %va, align [[PTRALIGN]]
33// O32: [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[INTPTR_T:i32]] [[CHUNKSIZE:4]]
34// NEW: [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[INTPTR_T:i32|i64]] [[CHUNKSIZE:8]]
35//
36// ALL: store i8* [[AP_NEXT]], i8** %va, align [[PTRALIGN]]
37//
38// O32: [[AP_CAST:%.+]] = bitcast i8* [[AP_CUR]] to [[CHUNK_T:i32]]*
39// O32: [[ARG:%.+]] = load i32, i32* [[AP_CAST]], align [[CHUNKALIGN:4]]
40//
41// N32: [[AP_CAST:%.+]] = bitcast i8* [[AP_CUR]] to [[CHUNK_T:i64]]*
42// N32: [[TMP:%.+]] = load i64, i64* [[AP_CAST]], align [[CHUNKALIGN:8]]
43// N64: [[AP_CAST:%.+]] = bitcast i8* [[AP_CUR]] to [[CHUNK_T:i64]]*
44// N64: [[TMP:%.+]] = load i64, i64* [[AP_CAST]], align [[CHUNKALIGN:8]]
45// NEW: [[TMP2:%.+]] = trunc i64 [[TMP]] to i32
46// NEW: store i32 [[TMP2]], i32* [[PROMOTION_TEMP]], align 4
47// NEW: [[ARG:%.+]] = load i32, i32* [[PROMOTION_TEMP]], align 4
48// ALL: store i32 [[ARG]], i32* [[V]], align 4
Daniel Sanders59229dc2014-11-19 10:01:35 +000049//
50// ALL: [[VA1:%.+]] = bitcast i8** %va to i8*
Daniel Sanders59229dc2014-11-19 10:01:35 +000051// ALL: call void @llvm.va_end(i8* [[VA1]])
Daniel Sanders59229dc2014-11-19 10:01:35 +000052// ALL: }
53
54long long test_i64(char *fmt, ...) {
55 va_list va;
56
57 va_start(va, fmt);
58 long long v = va_arg(va, long long);
59 va_end(va);
60
61 return v;
62}
63
64// ALL-LABEL: define i64 @test_i64(i8*{{.*}} %fmt, ...)
65//
66// ALL: %va = alloca i8*, align [[PTRALIGN]]
John McCall7f416cc2015-09-08 08:05:57 +000067// ALL: [[VA:%.+]] = bitcast i8** %va to i8*
68// ALL: call void @llvm.va_start(i8* [[VA]])
69// ALL: [[AP_CUR:%.+]] = load i8*, i8** %va, align [[PTRALIGN]]
Daniel Sanders59229dc2014-11-19 10:01:35 +000070//
71// i64 is 8-byte aligned, while this is within O32's stack alignment there's no
72// guarantee that the offset is still 8-byte aligned after earlier reads.
John McCall7f416cc2015-09-08 08:05:57 +000073// O32: [[TMP1:%.+]] = ptrtoint i8* [[AP_CUR]] to i32
74// O32: [[TMP2:%.+]] = add i32 [[TMP1]], 7
75// O32: [[TMP3:%.+]] = and i32 [[TMP2]], -8
76// O32: [[AP_CUR:%.+]] = inttoptr i32 [[TMP3]] to i8*
Daniel Sanders59229dc2014-11-19 10:01:35 +000077//
John McCall7f416cc2015-09-08 08:05:57 +000078// ALL: [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[INTPTR_T]] 8
79// ALL: store i8* [[AP_NEXT]], i8** %va, align [[PTRALIGN]]
Daniel Sanders59229dc2014-11-19 10:01:35 +000080//
John McCall7f416cc2015-09-08 08:05:57 +000081// ALL: [[AP_CAST:%.*]] = bitcast i8* [[AP_CUR]] to i64*
82// ALL: [[ARG:%.+]] = load i64, i64* [[AP_CAST]], align 8
Daniel Sanders59229dc2014-11-19 10:01:35 +000083//
John McCall7f416cc2015-09-08 08:05:57 +000084// ALL: [[VA1:%.+]] = bitcast i8** %va to i8*
Daniel Sanders59229dc2014-11-19 10:01:35 +000085// ALL: call void @llvm.va_end(i8* [[VA1]])
Daniel Sanders59229dc2014-11-19 10:01:35 +000086// ALL: }
87
Daniel Sanderscdcb5802015-01-13 10:47:00 +000088char *test_ptr(char *fmt, ...) {
89 va_list va;
90
91 va_start(va, fmt);
92 char *v = va_arg(va, char *);
93 va_end(va);
94
95 return v;
96}
97
98// ALL-LABEL: define i8* @test_ptr(i8*{{.*}} %fmt, ...)
99//
John McCall7f416cc2015-09-08 08:05:57 +0000100// ALL: %va = alloca i8*, align [[PTRALIGN]]
101// ALL: [[V:%.*]] = alloca i8*, align [[PTRALIGN]]
102// N32: [[AP_CAST:%.+]] = alloca i8*, align 4
103// ALL: [[VA:%.+]] = bitcast i8** %va to i8*
104// ALL: call void @llvm.va_start(i8* [[VA]])
105// ALL: [[AP_CUR:%.+]] = load i8*, i8** %va, align [[PTRALIGN]]
106// ALL: [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[INTPTR_T]] [[CHUNKSIZE]]
107// ALL: store i8* [[AP_NEXT]], i8** %va, align [[PTRALIGN]]
Daniel Sanderscdcb5802015-01-13 10:47:00 +0000108//
John McCall7f416cc2015-09-08 08:05:57 +0000109// When the chunk size matches the pointer size, this is easy.
110// O32: [[AP_CAST:%.+]] = bitcast i8* [[AP_CUR]] to i8**
111// N64: [[AP_CAST:%.+]] = bitcast i8* [[AP_CUR]] to i8**
112// Otherwise we need a promotion temporary.
113// N32: [[TMP1:%.+]] = bitcast i8* [[AP_CUR]] to i64*
114// N32: [[TMP2:%.+]] = load i64, i64* [[TMP1]], align 8
Daniel Sanderscdcb5802015-01-13 10:47:00 +0000115// N32: [[TMP3:%.+]] = trunc i64 [[TMP2]] to i32
John McCall7f416cc2015-09-08 08:05:57 +0000116// N32: [[PTR:%.+]] = inttoptr i32 [[TMP3]] to i8*
117// N32: store i8* [[PTR]], i8** [[AP_CAST]], align 4
Daniel Sanderscdcb5802015-01-13 10:47:00 +0000118//
John McCall7f416cc2015-09-08 08:05:57 +0000119// ALL: [[ARG:%.+]] = load i8*, i8** [[AP_CAST]], align [[PTRALIGN]]
120// ALL: store i8* [[ARG]], i8** [[V]], align [[PTRALIGN]]
121//
NAKAMURA Takumif7bee712015-09-08 09:37:09 +0000122// ALL: [[VA1:%.+]] = bitcast i8** %va to i8*
Daniel Sanderscdcb5802015-01-13 10:47:00 +0000123// ALL: call void @llvm.va_end(i8* [[VA1]])
Daniel Sanderscdcb5802015-01-13 10:47:00 +0000124// ALL: }
125
Daniel Sanders8d36a612014-09-22 13:27:06 +0000126int test_v4i32(char *fmt, ...) {
127 va_list va;
128
129 va_start(va, fmt);
130 v4i32 v = va_arg(va, v4i32);
131 va_end(va);
132
133 return v[0];
134}
135
Daniel Sanders59229dc2014-11-19 10:01:35 +0000136// ALL-LABEL: define i32 @test_v4i32(i8*{{.*}} %fmt, ...)
Daniel Sanders8d36a612014-09-22 13:27:06 +0000137//
Daniel Sanders59229dc2014-11-19 10:01:35 +0000138// ALL: %va = alloca i8*, align [[PTRALIGN]]
John McCall7f416cc2015-09-08 08:05:57 +0000139// ALL: [[V]] = alloca <4 x i32>, align 16
NAKAMURA Takumi22a0fd42014-09-22 16:40:05 +0000140// ALL: [[VA1:%.+]] = bitcast i8** %va to i8*
141// ALL: call void @llvm.va_start(i8* [[VA1]])
John McCall7f416cc2015-09-08 08:05:57 +0000142// ALL: [[AP_CUR:%.+]] = load i8*, i8** %va, align [[PTRALIGN]]
Daniel Sanders8d36a612014-09-22 13:27:06 +0000143//
144// Vectors are 16-byte aligned, however the O32 ABI has a maximum alignment of
145// 8-bytes since the base of the stack is 8-byte aligned.
John McCall7f416cc2015-09-08 08:05:57 +0000146// O32: [[TMP1:%.+]] = ptrtoint i8* [[AP_CUR]] to i32
147// O32: [[TMP2:%.+]] = add i32 [[TMP1]], 7
148// O32: [[TMP3:%.+]] = and i32 [[TMP2]], -8
149// O32: [[AP_CUR:%.+]] = inttoptr i32 [[TMP3]] to i8*
Daniel Sanders8d36a612014-09-22 13:27:06 +0000150//
John McCall7f416cc2015-09-08 08:05:57 +0000151// NEW: [[TMP1:%.+]] = ptrtoint i8* [[AP_CUR]] to [[INTPTR_T]]
152// NEW: [[TMP2:%.+]] = add [[INTPTR_T]] [[TMP1]], 15
153// NEW: [[TMP3:%.+]] = and [[INTPTR_T]] [[TMP2]], -16
154// NEW: [[AP_CUR:%.+]] = inttoptr [[INTPTR_T]] [[TMP3]] to i8*
Daniel Sanders8d36a612014-09-22 13:27:06 +0000155//
John McCall7f416cc2015-09-08 08:05:57 +0000156// ALL: [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[INTPTR_T]] 16
NAKAMURA Takumi22a0fd42014-09-22 16:40:05 +0000157// ALL: store i8* [[AP_NEXT]], i8** %va, align [[PTRALIGN]]
John McCall7f416cc2015-09-08 08:05:57 +0000158//
159// ALL: [[AP_CAST:%.+]] = bitcast i8* [[AP_CUR]] to <4 x i32>*
160// O32: [[ARG:%.+]] = load <4 x i32>, <4 x i32>* [[AP_CAST]], align 8
161// N64: [[ARG:%.+]] = load <4 x i32>, <4 x i32>* [[AP_CAST]], align 16
162// ALL: store <4 x i32> [[ARG]], <4 x i32>* [[V]], align 16
163//
164// ALL: [[VA1:%.+]] = bitcast i8** %va to i8*
NAKAMURA Takumi22a0fd42014-09-22 16:40:05 +0000165// ALL: call void @llvm.va_end(i8* [[VA1]])
John McCall7f416cc2015-09-08 08:05:57 +0000166// ALL: [[VECEXT:%.+]] = extractelement <4 x i32> {{.*}}, i32 0
NAKAMURA Takumi22a0fd42014-09-22 16:40:05 +0000167// ALL: ret i32 [[VECEXT]]
Daniel Sanders8d36a612014-09-22 13:27:06 +0000168// ALL: }