Reid Kleckner | 368d52b | 2018-06-06 01:35:08 +0000 | [diff] [blame] | 1 | // RUN: %clang_cc1 -fms-extensions -triple x86_64-windows-msvc %s -emit-llvm -o - | FileCheck %s --check-prefix=X64 |
| 2 | // RUN: %clang_cc1 -fms-extensions -triple thumbv7-windows-msvc %s -emit-llvm -o - | FileCheck %s --check-prefix=ARM |
| 3 | // RUN: %clang_cc1 -fms-extensions -triple aarch64-windows-msvc %s -emit-llvm -o - | FileCheck %s --check-prefix=ARM |
Reid Kleckner | 1d9c249 | 2018-06-05 01:33:40 +0000 | [diff] [blame] | 4 | |
| 5 | volatile unsigned char sink = 0; |
| 6 | void test32(long *base, long idx) { |
| 7 | sink = _bittest(base, idx); |
| 8 | sink = _bittestandcomplement(base, idx); |
| 9 | sink = _bittestandreset(base, idx); |
| 10 | sink = _bittestandset(base, idx); |
| 11 | sink = _interlockedbittestandreset(base, idx); |
| 12 | sink = _interlockedbittestandset(base, idx); |
Reid Kleckner | aa46ed9 | 2018-06-07 21:39:04 +0000 | [diff] [blame] | 13 | sink = _interlockedbittestandset(base, idx); |
Reid Kleckner | 1d9c249 | 2018-06-05 01:33:40 +0000 | [diff] [blame] | 14 | } |
Reid Kleckner | aa46ed9 | 2018-06-07 21:39:04 +0000 | [diff] [blame] | 15 | |
Reid Kleckner | 1d9c249 | 2018-06-05 01:33:40 +0000 | [diff] [blame] | 16 | void test64(__int64 *base, __int64 idx) { |
| 17 | sink = _bittest64(base, idx); |
| 18 | sink = _bittestandcomplement64(base, idx); |
| 19 | sink = _bittestandreset64(base, idx); |
| 20 | sink = _bittestandset64(base, idx); |
| 21 | sink = _interlockedbittestandreset64(base, idx); |
| 22 | sink = _interlockedbittestandset64(base, idx); |
| 23 | } |
| 24 | |
Reid Kleckner | aa46ed9 | 2018-06-07 21:39:04 +0000 | [diff] [blame] | 25 | #if defined(_M_ARM) || defined(_M_ARM64) |
| 26 | void test_arm(long *base, long idx) { |
| 27 | sink = _interlockedbittestandreset_acq(base, idx); |
| 28 | sink = _interlockedbittestandreset_rel(base, idx); |
| 29 | sink = _interlockedbittestandreset_nf(base, idx); |
| 30 | sink = _interlockedbittestandset_acq(base, idx); |
| 31 | sink = _interlockedbittestandset_rel(base, idx); |
| 32 | sink = _interlockedbittestandset_nf(base, idx); |
| 33 | } |
| 34 | #endif |
| 35 | |
Reid Kleckner | 368d52b | 2018-06-06 01:35:08 +0000 | [diff] [blame] | 36 | // X64-LABEL: define dso_local void @test32(i32* %base, i32 %idx) |
| 37 | // X64: call i8 asm sideeffect "btl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}}) |
| 38 | // X64: call i8 asm sideeffect "btcl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}}) |
| 39 | // X64: call i8 asm sideeffect "btrl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}}) |
| 40 | // X64: call i8 asm sideeffect "btsl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}}) |
| 41 | // X64: call i8 asm sideeffect "lock btrl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}}) |
| 42 | // X64: call i8 asm sideeffect "lock btsl $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i32* %{{.*}}, i32 {{.*}}) |
Reid Kleckner | 1d9c249 | 2018-06-05 01:33:40 +0000 | [diff] [blame] | 43 | |
Reid Kleckner | 368d52b | 2018-06-06 01:35:08 +0000 | [diff] [blame] | 44 | // X64-LABEL: define dso_local void @test64(i64* %base, i64 %idx) |
| 45 | // X64: call i8 asm sideeffect "btq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}}) |
| 46 | // X64: call i8 asm sideeffect "btcq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}}) |
| 47 | // X64: call i8 asm sideeffect "btrq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}}) |
| 48 | // X64: call i8 asm sideeffect "btsq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}}) |
| 49 | // X64: call i8 asm sideeffect "lock btrq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}}) |
| 50 | // X64: call i8 asm sideeffect "lock btsq $2, ($1)\0A\09setc ${0:b}", "=r,r,r,~{{.*}}"(i64* %{{.*}}, i64 {{.*}}) |
| 51 | |
| 52 | // ARM-LABEL: define dso_local {{.*}}void @test32(i32* %base, i32 %idx) |
| 53 | // ARM: %[[IDXHI:[^ ]*]] = ashr i32 %{{.*}}, 3 |
| 54 | // ARM: %[[BASE:[^ ]*]] = bitcast i32* %{{.*}} to i8* |
| 55 | // ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, i8* %[[BASE]], i32 %[[IDXHI]] |
| 56 | // ARM: %[[IDX8:[^ ]*]] = trunc i32 %{{.*}} to i8 |
| 57 | // ARM: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7 |
| 58 | // ARM: %[[BYTE:[^ ]*]] = load i8, i8* %[[BYTEADDR]], align 1 |
| 59 | // ARM: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]] |
| 60 | // ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1 |
| 61 | // ARM: store volatile i8 %[[RES]], i8* @sink, align 1 |
| 62 | |
| 63 | // ARM: %[[IDXHI:[^ ]*]] = ashr i32 %{{.*}}, 3 |
| 64 | // ARM: %[[BASE:[^ ]*]] = bitcast i32* %{{.*}} to i8* |
| 65 | // ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, i8* %[[BASE]], i32 %[[IDXHI]] |
| 66 | // ARM: %[[IDX8:[^ ]*]] = trunc i32 %{{.*}} to i8 |
| 67 | // ARM: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7 |
| 68 | // ARM: %[[MASK:[^ ]*]] = shl i8 1, %[[IDXLO]] |
| 69 | // ARM: %[[BYTE:[^ ]*]] = load i8, i8* %[[BYTEADDR]], align 1 |
| 70 | // ARM: %[[NEWBYTE:[^ ]*]] = xor i8 %[[BYTE]], %[[MASK]] |
| 71 | // ARM store i8 %[[NEWBYTE]], i8* %[[BYTEADDR]], align 1 |
| 72 | // ARM: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]] |
| 73 | // ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1 |
| 74 | // ARM: store volatile i8 %[[RES]], i8* @sink, align 1 |
| 75 | |
| 76 | // ARM: %[[IDXHI:[^ ]*]] = ashr i32 %{{.*}}, 3 |
| 77 | // ARM: %[[BASE:[^ ]*]] = bitcast i32* %{{.*}} to i8* |
| 78 | // ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, i8* %[[BASE]], i32 %[[IDXHI]] |
| 79 | // ARM: %[[IDX8:[^ ]*]] = trunc i32 %{{.*}} to i8 |
| 80 | // ARM: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7 |
| 81 | // ARM: %[[MASK:[^ ]*]] = shl i8 1, %[[IDXLO]] |
| 82 | // ARM: %[[BYTE:[^ ]*]] = load i8, i8* %[[BYTEADDR]], align 1 |
| 83 | // ARM: %[[NOTMASK:[^ ]*]] = xor i8 %[[MASK]], -1 |
| 84 | // ARM: %[[NEWBYTE:[^ ]*]] = and i8 %[[BYTE]], %[[NOTMASK]] |
| 85 | // ARM store i8 %[[NEWBYTE]], i8* %[[BYTEADDR]], align 1 |
| 86 | // ARM: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]] |
| 87 | // ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1 |
| 88 | // ARM: store volatile i8 %[[RES]], i8* @sink, align 1 |
| 89 | |
| 90 | // ARM: %[[IDXHI:[^ ]*]] = ashr i32 %{{.*}}, 3 |
| 91 | // ARM: %[[BASE:[^ ]*]] = bitcast i32* %{{.*}} to i8* |
| 92 | // ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, i8* %[[BASE]], i32 %[[IDXHI]] |
| 93 | // ARM: %[[IDX8:[^ ]*]] = trunc i32 %{{.*}} to i8 |
| 94 | // ARM: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7 |
| 95 | // ARM: %[[MASK:[^ ]*]] = shl i8 1, %[[IDXLO]] |
| 96 | // ARM: %[[BYTE:[^ ]*]] = load i8, i8* %[[BYTEADDR]], align 1 |
| 97 | // ARM: %[[NEWBYTE:[^ ]*]] = or i8 %[[BYTE]], %[[MASK]] |
| 98 | // ARM store i8 %[[NEWBYTE]], i8* %[[BYTEADDR]], align 1 |
| 99 | // ARM: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]] |
| 100 | // ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1 |
| 101 | // ARM: store volatile i8 %[[RES]], i8* @sink, align 1 |
| 102 | |
| 103 | // ARM: %[[IDXHI:[^ ]*]] = ashr i32 %{{.*}}, 3 |
| 104 | // ARM: %[[BASE:[^ ]*]] = bitcast i32* %{{.*}} to i8* |
| 105 | // ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, i8* %[[BASE]], i32 %[[IDXHI]] |
| 106 | // ARM: %[[IDX8:[^ ]*]] = trunc i32 %{{.*}} to i8 |
| 107 | // ARM: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7 |
| 108 | // ARM: %[[MASK:[^ ]*]] = shl i8 1, %[[IDXLO]] |
| 109 | // ARM: %[[NOTMASK:[^ ]*]] = xor i8 %[[MASK]], -1 |
| 110 | // ARM: %[[BYTE:[^ ]*]] = atomicrmw and i8* %[[BYTEADDR]], i8 %[[NOTMASK]] seq_cst |
| 111 | // ARM: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]] |
| 112 | // ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1 |
| 113 | // ARM: store volatile i8 %[[RES]], i8* @sink, align 1 |
| 114 | |
| 115 | // ARM: %[[IDXHI:[^ ]*]] = ashr i32 %{{.*}}, 3 |
| 116 | // ARM: %[[BASE:[^ ]*]] = bitcast i32* %{{.*}} to i8* |
| 117 | // ARM: %[[BYTEADDR:[^ ]*]] = getelementptr inbounds i8, i8* %[[BASE]], i32 %[[IDXHI]] |
| 118 | // ARM: %[[IDX8:[^ ]*]] = trunc i32 %{{.*}} to i8 |
| 119 | // ARM: %[[IDXLO:[^ ]*]] = and i8 %[[IDX8]], 7 |
| 120 | // ARM: %[[MASK:[^ ]*]] = shl i8 1, %[[IDXLO]] |
| 121 | // ARM: %[[BYTE:[^ ]*]] = atomicrmw or i8* %[[BYTEADDR]], i8 %[[MASK]] seq_cst |
| 122 | // ARM: %[[BYTESHR:[^ ]*]] = lshr i8 %[[BYTE]], %[[IDXLO]] |
| 123 | // ARM: %[[RES:[^ ]*]] = and i8 %[[BYTESHR]], 1 |
| 124 | // ARM: store volatile i8 %[[RES]], i8* @sink, align 1 |
| 125 | |
Reid Kleckner | 368d52b | 2018-06-06 01:35:08 +0000 | [diff] [blame] | 126 | |
Reid Kleckner | aa46ed9 | 2018-06-07 21:39:04 +0000 | [diff] [blame] | 127 | // Just look for the atomicrmw instructions. |
| 128 | |
| 129 | // ARM-LABEL: define dso_local {{.*}}void @test_arm(i32* %base, i32 %idx) |
| 130 | // ARM: atomicrmw and i8* %{{.*}}, i8 {{.*}} acquire |
| 131 | // ARM: atomicrmw and i8* %{{.*}}, i8 {{.*}} release |
| 132 | // ARM: atomicrmw and i8* %{{.*}}, i8 {{.*}} monotonic |
| 133 | // ARM: atomicrmw or i8* %{{.*}}, i8 {{.*}} acquire |
| 134 | // ARM: atomicrmw or i8* %{{.*}}, i8 {{.*}} release |
| 135 | // ARM: atomicrmw or i8* %{{.*}}, i8 {{.*}} monotonic |