John McCall | 7f416cc | 2015-09-08 08:05:57 +0000 | [diff] [blame] | 1 | // RUN: %clang_cc1 -triple mips-unknown-linux -o - -emit-llvm %s | FileCheck %s -check-prefix=ALL -check-prefix=O32 |
| 2 | // RUN: %clang_cc1 -triple mipsel-unknown-linux -o - -emit-llvm %s | FileCheck %s -check-prefix=ALL -check-prefix=O32 |
| 3 | // RUN: %clang_cc1 -triple mips64-unknown-linux -o - -emit-llvm -target-abi n32 %s | FileCheck %s -check-prefix=ALL -check-prefix=N32 -check-prefix=NEW |
| 4 | // RUN: %clang_cc1 -triple mips64-unknown-linux -o - -emit-llvm -target-abi n32 %s | FileCheck %s -check-prefix=ALL -check-prefix=N32 -check-prefix=NEW |
| 5 | // RUN: %clang_cc1 -triple mips64-unknown-linux -o - -emit-llvm %s | FileCheck %s -check-prefix=ALL -check-prefix=N64 -check-prefix=NEW |
| 6 | // RUN: %clang_cc1 -triple mips64el-unknown-linux -o - -emit-llvm %s | FileCheck %s -check-prefix=ALL -check-prefix=N64 -check-prefix=NEW |
Daniel Sanders | 8d36a61 | 2014-09-22 13:27:06 +0000 | [diff] [blame] | 7 | |
| 8 | #include <stdarg.h> |
| 9 | |
| 10 | typedef int v4i32 __attribute__ ((__vector_size__ (16))); |
| 11 | |
Daniel Sanders | 59229dc | 2014-11-19 10:01:35 +0000 | [diff] [blame] | 12 | int test_i32(char *fmt, ...) { |
| 13 | va_list va; |
| 14 | |
| 15 | va_start(va, fmt); |
| 16 | int v = va_arg(va, int); |
| 17 | va_end(va); |
| 18 | |
| 19 | return v; |
| 20 | } |
| 21 | |
| 22 | // ALL-LABEL: define i32 @test_i32(i8*{{.*}} %fmt, ...) |
| 23 | // |
| 24 | // O32: %va = alloca i8*, align [[PTRALIGN:4]] |
| 25 | // N32: %va = alloca i8*, align [[PTRALIGN:4]] |
| 26 | // N64: %va = alloca i8*, align [[PTRALIGN:8]] |
John McCall | 7f416cc | 2015-09-08 08:05:57 +0000 | [diff] [blame] | 27 | // ALL: [[V:%.*]] = alloca i32, align 4 |
| 28 | // NEW: [[PROMOTION_TEMP:%.*]] = alloca i32, align 4 |
| 29 | // |
| 30 | // ALL: [[VA:%.+]] = bitcast i8** %va to i8* |
| 31 | // ALL: call void @llvm.va_start(i8* [[VA]]) |
| 32 | // ALL: [[AP_CUR:%.+]] = load i8*, i8** %va, align [[PTRALIGN]] |
| 33 | // O32: [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[INTPTR_T:i32]] [[CHUNKSIZE:4]] |
| 34 | // NEW: [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[INTPTR_T:i32|i64]] [[CHUNKSIZE:8]] |
| 35 | // |
| 36 | // ALL: store i8* [[AP_NEXT]], i8** %va, align [[PTRALIGN]] |
| 37 | // |
| 38 | // O32: [[AP_CAST:%.+]] = bitcast i8* [[AP_CUR]] to [[CHUNK_T:i32]]* |
| 39 | // O32: [[ARG:%.+]] = load i32, i32* [[AP_CAST]], align [[CHUNKALIGN:4]] |
| 40 | // |
| 41 | // N32: [[AP_CAST:%.+]] = bitcast i8* [[AP_CUR]] to [[CHUNK_T:i64]]* |
| 42 | // N32: [[TMP:%.+]] = load i64, i64* [[AP_CAST]], align [[CHUNKALIGN:8]] |
| 43 | // N64: [[AP_CAST:%.+]] = bitcast i8* [[AP_CUR]] to [[CHUNK_T:i64]]* |
| 44 | // N64: [[TMP:%.+]] = load i64, i64* [[AP_CAST]], align [[CHUNKALIGN:8]] |
| 45 | // NEW: [[TMP2:%.+]] = trunc i64 [[TMP]] to i32 |
| 46 | // NEW: store i32 [[TMP2]], i32* [[PROMOTION_TEMP]], align 4 |
| 47 | // NEW: [[ARG:%.+]] = load i32, i32* [[PROMOTION_TEMP]], align 4 |
| 48 | // ALL: store i32 [[ARG]], i32* [[V]], align 4 |
Daniel Sanders | 59229dc | 2014-11-19 10:01:35 +0000 | [diff] [blame] | 49 | // |
| 50 | // ALL: [[VA1:%.+]] = bitcast i8** %va to i8* |
Daniel Sanders | 59229dc | 2014-11-19 10:01:35 +0000 | [diff] [blame] | 51 | // ALL: call void @llvm.va_end(i8* [[VA1]]) |
Daniel Sanders | 59229dc | 2014-11-19 10:01:35 +0000 | [diff] [blame] | 52 | // ALL: } |
| 53 | |
| 54 | long long test_i64(char *fmt, ...) { |
| 55 | va_list va; |
| 56 | |
| 57 | va_start(va, fmt); |
| 58 | long long v = va_arg(va, long long); |
| 59 | va_end(va); |
| 60 | |
| 61 | return v; |
| 62 | } |
| 63 | |
| 64 | // ALL-LABEL: define i64 @test_i64(i8*{{.*}} %fmt, ...) |
| 65 | // |
| 66 | // ALL: %va = alloca i8*, align [[PTRALIGN]] |
John McCall | 7f416cc | 2015-09-08 08:05:57 +0000 | [diff] [blame] | 67 | // ALL: [[VA:%.+]] = bitcast i8** %va to i8* |
| 68 | // ALL: call void @llvm.va_start(i8* [[VA]]) |
| 69 | // ALL: [[AP_CUR:%.+]] = load i8*, i8** %va, align [[PTRALIGN]] |
Daniel Sanders | 59229dc | 2014-11-19 10:01:35 +0000 | [diff] [blame] | 70 | // |
| 71 | // i64 is 8-byte aligned, while this is within O32's stack alignment there's no |
| 72 | // guarantee that the offset is still 8-byte aligned after earlier reads. |
John McCall | 7f416cc | 2015-09-08 08:05:57 +0000 | [diff] [blame] | 73 | // O32: [[TMP1:%.+]] = ptrtoint i8* [[AP_CUR]] to i32 |
| 74 | // O32: [[TMP2:%.+]] = add i32 [[TMP1]], 7 |
| 75 | // O32: [[TMP3:%.+]] = and i32 [[TMP2]], -8 |
| 76 | // O32: [[AP_CUR:%.+]] = inttoptr i32 [[TMP3]] to i8* |
Daniel Sanders | 59229dc | 2014-11-19 10:01:35 +0000 | [diff] [blame] | 77 | // |
John McCall | 7f416cc | 2015-09-08 08:05:57 +0000 | [diff] [blame] | 78 | // ALL: [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[INTPTR_T]] 8 |
| 79 | // ALL: store i8* [[AP_NEXT]], i8** %va, align [[PTRALIGN]] |
Daniel Sanders | 59229dc | 2014-11-19 10:01:35 +0000 | [diff] [blame] | 80 | // |
John McCall | 7f416cc | 2015-09-08 08:05:57 +0000 | [diff] [blame] | 81 | // ALL: [[AP_CAST:%.*]] = bitcast i8* [[AP_CUR]] to i64* |
| 82 | // ALL: [[ARG:%.+]] = load i64, i64* [[AP_CAST]], align 8 |
Daniel Sanders | 59229dc | 2014-11-19 10:01:35 +0000 | [diff] [blame] | 83 | // |
John McCall | 7f416cc | 2015-09-08 08:05:57 +0000 | [diff] [blame] | 84 | // ALL: [[VA1:%.+]] = bitcast i8** %va to i8* |
Daniel Sanders | 59229dc | 2014-11-19 10:01:35 +0000 | [diff] [blame] | 85 | // ALL: call void @llvm.va_end(i8* [[VA1]]) |
Daniel Sanders | 59229dc | 2014-11-19 10:01:35 +0000 | [diff] [blame] | 86 | // ALL: } |
| 87 | |
Daniel Sanders | cdcb580 | 2015-01-13 10:47:00 +0000 | [diff] [blame] | 88 | char *test_ptr(char *fmt, ...) { |
| 89 | va_list va; |
| 90 | |
| 91 | va_start(va, fmt); |
| 92 | char *v = va_arg(va, char *); |
| 93 | va_end(va); |
| 94 | |
| 95 | return v; |
| 96 | } |
| 97 | |
| 98 | // ALL-LABEL: define i8* @test_ptr(i8*{{.*}} %fmt, ...) |
| 99 | // |
John McCall | 7f416cc | 2015-09-08 08:05:57 +0000 | [diff] [blame] | 100 | // ALL: %va = alloca i8*, align [[PTRALIGN]] |
| 101 | // ALL: [[V:%.*]] = alloca i8*, align [[PTRALIGN]] |
| 102 | // N32: [[AP_CAST:%.+]] = alloca i8*, align 4 |
| 103 | // ALL: [[VA:%.+]] = bitcast i8** %va to i8* |
| 104 | // ALL: call void @llvm.va_start(i8* [[VA]]) |
| 105 | // ALL: [[AP_CUR:%.+]] = load i8*, i8** %va, align [[PTRALIGN]] |
| 106 | // ALL: [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[INTPTR_T]] [[CHUNKSIZE]] |
| 107 | // ALL: store i8* [[AP_NEXT]], i8** %va, align [[PTRALIGN]] |
Daniel Sanders | cdcb580 | 2015-01-13 10:47:00 +0000 | [diff] [blame] | 108 | // |
John McCall | 7f416cc | 2015-09-08 08:05:57 +0000 | [diff] [blame] | 109 | // When the chunk size matches the pointer size, this is easy. |
| 110 | // O32: [[AP_CAST:%.+]] = bitcast i8* [[AP_CUR]] to i8** |
| 111 | // N64: [[AP_CAST:%.+]] = bitcast i8* [[AP_CUR]] to i8** |
| 112 | // Otherwise we need a promotion temporary. |
| 113 | // N32: [[TMP1:%.+]] = bitcast i8* [[AP_CUR]] to i64* |
| 114 | // N32: [[TMP2:%.+]] = load i64, i64* [[TMP1]], align 8 |
Daniel Sanders | cdcb580 | 2015-01-13 10:47:00 +0000 | [diff] [blame] | 115 | // N32: [[TMP3:%.+]] = trunc i64 [[TMP2]] to i32 |
John McCall | 7f416cc | 2015-09-08 08:05:57 +0000 | [diff] [blame] | 116 | // N32: [[PTR:%.+]] = inttoptr i32 [[TMP3]] to i8* |
| 117 | // N32: store i8* [[PTR]], i8** [[AP_CAST]], align 4 |
Daniel Sanders | cdcb580 | 2015-01-13 10:47:00 +0000 | [diff] [blame] | 118 | // |
John McCall | 7f416cc | 2015-09-08 08:05:57 +0000 | [diff] [blame] | 119 | // ALL: [[ARG:%.+]] = load i8*, i8** [[AP_CAST]], align [[PTRALIGN]] |
| 120 | // ALL: store i8* [[ARG]], i8** [[V]], align [[PTRALIGN]] |
| 121 | // |
NAKAMURA Takumi | f7bee71 | 2015-09-08 09:37:09 +0000 | [diff] [blame] | 122 | // ALL: [[VA1:%.+]] = bitcast i8** %va to i8* |
Daniel Sanders | cdcb580 | 2015-01-13 10:47:00 +0000 | [diff] [blame] | 123 | // ALL: call void @llvm.va_end(i8* [[VA1]]) |
Daniel Sanders | cdcb580 | 2015-01-13 10:47:00 +0000 | [diff] [blame] | 124 | // ALL: } |
| 125 | |
Daniel Sanders | 8d36a61 | 2014-09-22 13:27:06 +0000 | [diff] [blame] | 126 | int test_v4i32(char *fmt, ...) { |
| 127 | va_list va; |
| 128 | |
| 129 | va_start(va, fmt); |
| 130 | v4i32 v = va_arg(va, v4i32); |
| 131 | va_end(va); |
| 132 | |
| 133 | return v[0]; |
| 134 | } |
| 135 | |
Daniel Sanders | 59229dc | 2014-11-19 10:01:35 +0000 | [diff] [blame] | 136 | // ALL-LABEL: define i32 @test_v4i32(i8*{{.*}} %fmt, ...) |
Daniel Sanders | 8d36a61 | 2014-09-22 13:27:06 +0000 | [diff] [blame] | 137 | // |
Daniel Sanders | 59229dc | 2014-11-19 10:01:35 +0000 | [diff] [blame] | 138 | // ALL: %va = alloca i8*, align [[PTRALIGN]] |
John McCall | 7f416cc | 2015-09-08 08:05:57 +0000 | [diff] [blame] | 139 | // ALL: [[V]] = alloca <4 x i32>, align 16 |
NAKAMURA Takumi | 22a0fd4 | 2014-09-22 16:40:05 +0000 | [diff] [blame] | 140 | // ALL: [[VA1:%.+]] = bitcast i8** %va to i8* |
| 141 | // ALL: call void @llvm.va_start(i8* [[VA1]]) |
John McCall | 7f416cc | 2015-09-08 08:05:57 +0000 | [diff] [blame] | 142 | // ALL: [[AP_CUR:%.+]] = load i8*, i8** %va, align [[PTRALIGN]] |
Daniel Sanders | 8d36a61 | 2014-09-22 13:27:06 +0000 | [diff] [blame] | 143 | // |
| 144 | // Vectors are 16-byte aligned, however the O32 ABI has a maximum alignment of |
| 145 | // 8-bytes since the base of the stack is 8-byte aligned. |
John McCall | 7f416cc | 2015-09-08 08:05:57 +0000 | [diff] [blame] | 146 | // O32: [[TMP1:%.+]] = ptrtoint i8* [[AP_CUR]] to i32 |
| 147 | // O32: [[TMP2:%.+]] = add i32 [[TMP1]], 7 |
| 148 | // O32: [[TMP3:%.+]] = and i32 [[TMP2]], -8 |
| 149 | // O32: [[AP_CUR:%.+]] = inttoptr i32 [[TMP3]] to i8* |
Daniel Sanders | 8d36a61 | 2014-09-22 13:27:06 +0000 | [diff] [blame] | 150 | // |
John McCall | 7f416cc | 2015-09-08 08:05:57 +0000 | [diff] [blame] | 151 | // NEW: [[TMP1:%.+]] = ptrtoint i8* [[AP_CUR]] to [[INTPTR_T]] |
| 152 | // NEW: [[TMP2:%.+]] = add [[INTPTR_T]] [[TMP1]], 15 |
| 153 | // NEW: [[TMP3:%.+]] = and [[INTPTR_T]] [[TMP2]], -16 |
| 154 | // NEW: [[AP_CUR:%.+]] = inttoptr [[INTPTR_T]] [[TMP3]] to i8* |
Daniel Sanders | 8d36a61 | 2014-09-22 13:27:06 +0000 | [diff] [blame] | 155 | // |
John McCall | 7f416cc | 2015-09-08 08:05:57 +0000 | [diff] [blame] | 156 | // ALL: [[AP_NEXT:%.+]] = getelementptr inbounds i8, i8* [[AP_CUR]], [[INTPTR_T]] 16 |
NAKAMURA Takumi | 22a0fd4 | 2014-09-22 16:40:05 +0000 | [diff] [blame] | 157 | // ALL: store i8* [[AP_NEXT]], i8** %va, align [[PTRALIGN]] |
John McCall | 7f416cc | 2015-09-08 08:05:57 +0000 | [diff] [blame] | 158 | // |
| 159 | // ALL: [[AP_CAST:%.+]] = bitcast i8* [[AP_CUR]] to <4 x i32>* |
| 160 | // O32: [[ARG:%.+]] = load <4 x i32>, <4 x i32>* [[AP_CAST]], align 8 |
| 161 | // N64: [[ARG:%.+]] = load <4 x i32>, <4 x i32>* [[AP_CAST]], align 16 |
| 162 | // ALL: store <4 x i32> [[ARG]], <4 x i32>* [[V]], align 16 |
| 163 | // |
| 164 | // ALL: [[VA1:%.+]] = bitcast i8** %va to i8* |
NAKAMURA Takumi | 22a0fd4 | 2014-09-22 16:40:05 +0000 | [diff] [blame] | 165 | // ALL: call void @llvm.va_end(i8* [[VA1]]) |
John McCall | 7f416cc | 2015-09-08 08:05:57 +0000 | [diff] [blame] | 166 | // ALL: [[VECEXT:%.+]] = extractelement <4 x i32> {{.*}}, i32 0 |
NAKAMURA Takumi | 22a0fd4 | 2014-09-22 16:40:05 +0000 | [diff] [blame] | 167 | // ALL: ret i32 [[VECEXT]] |
Daniel Sanders | 8d36a61 | 2014-09-22 13:27:06 +0000 | [diff] [blame] | 168 | // ALL: } |