| Amjad Aboud | 2b9b8a5 | 2015-10-13 12:29:35 +0000 | [diff] [blame] | 1 | // RUN: %clang_cc1 %s -DTEST_XSAVE -O0 -triple=i686-unknown-unknown -target-feature +xsave -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVE | 
|  | 2 | // RUN: %clang_cc1 %s -DTEST_XSAVE -O0 -triple=i686-unknown-unknown -target-feature +xsave -fno-signed-char -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVE | 
|  | 3 |  | 
|  | 4 | // RUN: %clang_cc1 %s -DTEST_XSAVEOPT -O0 -triple=i686-unknown-unknown -target-feature +xsave,+xsaveopt -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVEOPT | 
|  | 5 | // RUN: %clang_cc1 %s -DTEST_XSAVEOPT -O0 -triple=i686-unknown-unknown -target-feature +xsave,+xsaveopt -fno-signed-char -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVEOPT | 
|  | 6 |  | 
|  | 7 | // RUN: %clang_cc1 %s -DTEST_XSAVEC -O0 -triple=i686-unknown-unknown -target-feature +xsave,+xsavec -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVEC | 
|  | 8 | // RUN: %clang_cc1 %s -DTEST_XSAVEC -O0 -triple=i686-unknown-unknown -target-feature +xsave,+xsavec -fno-signed-char -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVEC | 
|  | 9 |  | 
|  | 10 | // RUN: %clang_cc1 %s -DTEST_XSAVES -O0 -triple=i686-unknown-unknown -target-feature +xsave,+xsaves -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVES | 
|  | 11 | // RUN: %clang_cc1 %s -DTEST_XSAVES -O0 -triple=i686-unknown-unknown -target-feature +xsave,+xsaves -fno-signed-char -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVES | 
|  | 12 |  | 
|  | 13 | void test() { | 
|  | 14 | unsigned long long tmp_ULLi; | 
|  | 15 | void*              tmp_vp; | 
|  | 16 |  | 
|  | 17 | #ifdef TEST_XSAVE | 
|  | 18 | // XSAVE: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4 | 
|  | 19 | // XSAVE: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 | 
|  | 20 | // XSAVE: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32 | 
|  | 21 | // XSAVE: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32 | 
|  | 22 | // XSAVE: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 | 
|  | 23 | // XSAVE: call void @llvm.x86.xsave(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]]) | 
|  | 24 | (void)__builtin_ia32_xsave(tmp_vp, tmp_ULLi); | 
|  | 25 |  | 
|  | 26 | // XSAVE: [[tmp_vp_3:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4 | 
|  | 27 | // XSAVE: [[tmp_ULLi_3:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 | 
|  | 28 | // XSAVE: [[high64_3:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_3]], 32 | 
|  | 29 | // XSAVE: [[high32_3:%[0-9a-zA-z]+]] = trunc i64 [[high64_3]] to i32 | 
|  | 30 | // XSAVE: [[low32_3:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_3]] to i32 | 
|  | 31 | // XSAVE: call void @llvm.x86.xrstor(i8* [[tmp_vp_3]], i32 [[high32_3]], i32 [[low32_3]]) | 
|  | 32 | (void)__builtin_ia32_xrstor(tmp_vp, tmp_ULLi); | 
|  | 33 | #endif | 
|  | 34 |  | 
|  | 35 | #ifdef TEST_XSAVEOPT | 
|  | 36 | // XSAVEOPT: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4 | 
|  | 37 | // XSAVEOPT: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 | 
|  | 38 | // XSAVEOPT: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32 | 
|  | 39 | // XSAVEOPT: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32 | 
|  | 40 | // XSAVEOPT: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 | 
|  | 41 | // XSAVEOPT: call void @llvm.x86.xsaveopt(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]]) | 
|  | 42 | (void)__builtin_ia32_xsaveopt(tmp_vp, tmp_ULLi); | 
|  | 43 | #endif | 
|  | 44 |  | 
|  | 45 | #ifdef TEST_XSAVEC | 
|  | 46 | // XSAVEC: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4 | 
|  | 47 | // XSAVEC: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 | 
|  | 48 | // XSAVEC: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32 | 
|  | 49 | // XSAVEC: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32 | 
|  | 50 | // XSAVEC: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 | 
|  | 51 | // XSAVEC: call void @llvm.x86.xsavec(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]]) | 
|  | 52 | (void)__builtin_ia32_xsavec(tmp_vp, tmp_ULLi); | 
|  | 53 | #endif | 
|  | 54 |  | 
|  | 55 | #ifdef TEST_XSAVES | 
|  | 56 | // XSAVES: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4 | 
|  | 57 | // XSAVES: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 | 
|  | 58 | // XSAVES: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32 | 
|  | 59 | // XSAVES: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32 | 
|  | 60 | // XSAVES: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32 | 
|  | 61 | // XSAVES: call void @llvm.x86.xsaves(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]]) | 
|  | 62 | (void)__builtin_ia32_xsaves(tmp_vp, tmp_ULLi); | 
|  | 63 |  | 
|  | 64 | // XSAVES: [[tmp_vp_3:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4 | 
|  | 65 | // XSAVES: [[tmp_ULLi_3:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8 | 
|  | 66 | // XSAVES: [[high64_3:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_3]], 32 | 
|  | 67 | // XSAVES: [[high32_3:%[0-9a-zA-z]+]] = trunc i64 [[high64_3]] to i32 | 
|  | 68 | // XSAVES: [[low32_3:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_3]] to i32 | 
|  | 69 | // XSAVES: call void @llvm.x86.xrstors(i8* [[tmp_vp_3]], i32 [[high32_3]], i32 [[low32_3]]) | 
|  | 70 | (void)__builtin_ia32_xrstors(tmp_vp, tmp_ULLi); | 
|  | 71 | #endif | 
|  | 72 | } |