Simon Pilgrim | 99193de | 2017-02-17 18:00:43 +0000 | [diff] [blame] | 1 | ; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+bmi,+bmi2 < %s | FileCheck %s |
| 2 | |
| 3 | target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" |
| 4 | target triple = "x86_64-unknown-unknown" |
| 5 | |
| 6 | ; Stack reload folding tests. |
| 7 | ; |
| 8 | ; By including a nop call with sideeffects we can force a partial register spill of the |
| 9 | ; relevant registers and check that the reload is correctly folded into the instruction. |
| 10 | |
| 11 | define i32 @stack_fold_bzhi_u32(i32 %a0, i32 %a1) { |
| 12 | ;CHECK-LABEL: stack_fold_bzhi_u32 |
| 13 | ;CHECK: bzhil %eax, {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload |
| 14 | %1 = tail call i32 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() |
| 15 | %2 = tail call i32 @llvm.x86.bmi.bzhi.32(i32 %a0, i32 %a1) |
| 16 | ret i32 %2 |
| 17 | } |
| 18 | declare i32 @llvm.x86.bmi.bzhi.32(i32, i32) |
| 19 | |
| 20 | define i64 @stack_fold_bzhi_u64(i64 %a0, i64 %a1) { |
| 21 | ;CHECK-LABEL: stack_fold_bzhi_u64 |
| 22 | ;CHECK: bzhiq %rax, {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload |
| 23 | %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() |
| 24 | %2 = tail call i64 @llvm.x86.bmi.bzhi.64(i64 %a0, i64 %a1) |
| 25 | ret i64 %2 |
| 26 | } |
| 27 | declare i64 @llvm.x86.bmi.bzhi.64(i64, i64) |
| 28 | |
| 29 | define i64 @stack_fold_mulx_u64(i64 %a0, i64 %a1, i64 *%a2) { |
| 30 | ;CHECK-LABEL: stack_fold_mulx_u64 |
| 31 | ;CHECK: mulxq {{-?[0-9]*}}(%rsp), %rax, %rcx {{.*#+}} 8-byte Folded Reload |
| 32 | %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() |
| 33 | %2 = zext i64 %a0 to i128 |
| 34 | %3 = zext i64 %a1 to i128 |
| 35 | %4 = mul i128 %2, %3 |
| 36 | %5 = lshr i128 %4, 64 |
| 37 | %6 = trunc i128 %4 to i64 |
| 38 | %7 = trunc i128 %5 to i64 |
| 39 | store i64 %7, i64 *%a2 |
| 40 | ret i64 %6 |
| 41 | } |
| 42 | |
| 43 | define i32 @stack_fold_pdep_u32(i32 %a0, i32 %a1) { |
| 44 | ;CHECK-LABEL: stack_fold_pdep_u32 |
| 45 | ;CHECK: pdepl {{-?[0-9]*}}(%rsp), %eax, %eax {{.*#+}} 4-byte Folded Reload |
| 46 | %1 = tail call i32 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() |
| 47 | %2 = tail call i32 @llvm.x86.bmi.pdep.32(i32 %a0, i32 %a1) |
| 48 | ret i32 %2 |
| 49 | } |
| 50 | declare i32 @llvm.x86.bmi.pdep.32(i32, i32) |
| 51 | |
| 52 | define i64 @stack_fold_pdep_u64(i64 %a0, i64 %a1) { |
| 53 | ;CHECK-LABEL: stack_fold_pdep_u64 |
| 54 | ;CHECK: pdepq {{-?[0-9]*}}(%rsp), %rax, %rax {{.*#+}} 8-byte Folded Reload |
| 55 | %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() |
| 56 | %2 = tail call i64 @llvm.x86.bmi.pdep.64(i64 %a0, i64 %a1) |
| 57 | ret i64 %2 |
| 58 | } |
| 59 | declare i64 @llvm.x86.bmi.pdep.64(i64, i64) |
| 60 | |
| 61 | define i32 @stack_fold_pext_u32(i32 %a0, i32 %a1) { |
| 62 | ;CHECK-LABEL: stack_fold_pext_u32 |
| 63 | ;CHECK: pextl {{-?[0-9]*}}(%rsp), %eax, %eax {{.*#+}} 4-byte Folded Reload |
| 64 | %1 = tail call i32 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() |
| 65 | %2 = tail call i32 @llvm.x86.bmi.pext.32(i32 %a0, i32 %a1) |
| 66 | ret i32 %2 |
| 67 | } |
| 68 | declare i32 @llvm.x86.bmi.pext.32(i32, i32) |
| 69 | |
| 70 | define i64 @stack_fold_pext_u64(i64 %a0, i64 %a1) { |
| 71 | ;CHECK-LABEL: stack_fold_pext_u64 |
| 72 | ;CHECK: pextq {{-?[0-9]*}}(%rsp), %rax, %rax {{.*#+}} 8-byte Folded Reload |
| 73 | %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() |
| 74 | %2 = tail call i64 @llvm.x86.bmi.pext.64(i64 %a0, i64 %a1) |
| 75 | ret i64 %2 |
| 76 | } |
| 77 | declare i64 @llvm.x86.bmi.pext.64(i64, i64) |