Philip Reames | 61a24ab | 2015-12-16 00:49:36 +0000 | [diff] [blame] | 1 | ; RUN: llc < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mattr=sse2 | FileCheck %s |
| 2 | |
| 3 | ; Note: This test is testing that the lowering for atomics matches what we |
| 4 | ; currently emit for non-atomics + the atomic restriction. The presence of |
| 5 | ; particular lowering detail in these tests should not be read as requiring |
| 6 | ; that detail for correctness unless it's related to the atomicity itself. |
| 7 | ; (Specifically, there were reviewer questions about the lowering for halfs |
| 8 | ; and their calling convention which remain unresolved.) |
| 9 | |
| 10 | define void @store_half(half* %fptr, half %v) { |
| 11 | ; CHECK-LABEL: @store_half |
| 12 | ; CHECK: movq %rdi, %rbx |
| 13 | ; CHECK: callq __gnu_f2h_ieee |
| 14 | ; CHECK: movw %ax, (%rbx) |
| 15 | store atomic half %v, half* %fptr unordered, align 2 |
| 16 | ret void |
| 17 | } |
| 18 | |
| 19 | define void @store_float(float* %fptr, float %v) { |
| 20 | ; CHECK-LABEL: @store_float |
| 21 | ; CHECK: movd %xmm0, %eax |
| 22 | ; CHECK: movl %eax, (%rdi) |
| 23 | store atomic float %v, float* %fptr unordered, align 4 |
| 24 | ret void |
| 25 | } |
| 26 | |
| 27 | define void @store_double(double* %fptr, double %v) { |
| 28 | ; CHECK-LABEL: @store_double |
| 29 | ; CHECK: movd %xmm0, %rax |
| 30 | ; CHECK: movq %rax, (%rdi) |
| 31 | store atomic double %v, double* %fptr unordered, align 8 |
| 32 | ret void |
| 33 | } |
| 34 | |
| 35 | define void @store_fp128(fp128* %fptr, fp128 %v) { |
| 36 | ; CHECK-LABEL: @store_fp128 |
| 37 | ; CHECK: callq __sync_lock_test_and_set_16 |
| 38 | store atomic fp128 %v, fp128* %fptr unordered, align 16 |
| 39 | ret void |
| 40 | } |
| 41 | |
| 42 | define half @load_half(half* %fptr) { |
| 43 | ; CHECK-LABEL: @load_half |
| 44 | ; CHECK: movw (%rdi), %ax |
| 45 | ; CHECK: movzwl %ax, %edi |
| 46 | ; CHECK: jmp __gnu_h2f_ieee |
| 47 | %v = load atomic half, half* %fptr unordered, align 2 |
| 48 | ret half %v |
| 49 | } |
| 50 | |
| 51 | define float @load_float(float* %fptr) { |
| 52 | ; CHECK-LABEL: @load_float |
| 53 | ; CHECK: movl (%rdi), %eax |
| 54 | ; CHECK: movd %eax, %xmm0 |
| 55 | %v = load atomic float, float* %fptr unordered, align 4 |
| 56 | ret float %v |
| 57 | } |
| 58 | |
| 59 | define double @load_double(double* %fptr) { |
| 60 | ; CHECK-LABEL: @load_double |
| 61 | ; CHECK: movq (%rdi), %rax |
| 62 | ; CHECK: movd %rax, %xmm0 |
| 63 | %v = load atomic double, double* %fptr unordered, align 8 |
| 64 | ret double %v |
| 65 | } |
| 66 | |
| 67 | define fp128 @load_fp128(fp128* %fptr) { |
| 68 | ; CHECK-LABEL: @load_fp128 |
| 69 | ; CHECK: callq __sync_val_compare_and_swap_16 |
| 70 | %v = load atomic fp128, fp128* %fptr unordered, align 16 |
| 71 | ret fp128 %v |
| 72 | } |
| 73 | |
| 74 | |
| 75 | ; sanity check the seq_cst lowering since that's the |
| 76 | ; interesting one from an ordering perspective on x86. |
| 77 | |
| 78 | define void @store_float_seq_cst(float* %fptr, float %v) { |
| 79 | ; CHECK-LABEL: @store_float_seq_cst |
| 80 | ; CHECK: movd %xmm0, %eax |
| 81 | ; CHECK: xchgl %eax, (%rdi) |
| 82 | store atomic float %v, float* %fptr seq_cst, align 4 |
| 83 | ret void |
| 84 | } |
| 85 | |
| 86 | define void @store_double_seq_cst(double* %fptr, double %v) { |
| 87 | ; CHECK-LABEL: @store_double_seq_cst |
| 88 | ; CHECK: movd %xmm0, %rax |
| 89 | ; CHECK: xchgq %rax, (%rdi) |
| 90 | store atomic double %v, double* %fptr seq_cst, align 8 |
| 91 | ret void |
| 92 | } |
| 93 | |
| 94 | define float @load_float_seq_cst(float* %fptr) { |
| 95 | ; CHECK-LABEL: @load_float_seq_cst |
| 96 | ; CHECK: movl (%rdi), %eax |
| 97 | ; CHECK: movd %eax, %xmm0 |
| 98 | %v = load atomic float, float* %fptr seq_cst, align 4 |
| 99 | ret float %v |
| 100 | } |
| 101 | |
| 102 | define double @load_double_seq_cst(double* %fptr) { |
| 103 | ; CHECK-LABEL: @load_double_seq_cst |
| 104 | ; CHECK: movq (%rdi), %rax |
| 105 | ; CHECK: movd %rax, %xmm0 |
| 106 | %v = load atomic double, double* %fptr seq_cst, align 8 |
| 107 | ret double %v |
| 108 | } |