Dan Gohman | fce288f | 2009-09-09 00:09:15 +0000 | [diff] [blame] | 1 | ; RUN: llc < %s -march=ppc64 |
Dale Johannesen | c4db727 | 2008-08-30 00:54:31 +0000 | [diff] [blame] | 2 | ; ModuleID = 'Atomics.c' |
| 3 | target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128" |
| 4 | target triple = "powerpc64-apple-darwin9" |
| 5 | @sc = common global i8 0 ; <i8*> [#uses=52] |
| 6 | @uc = common global i8 0 ; <i8*> [#uses=100] |
| 7 | @ss = common global i16 0 ; <i16*> [#uses=15] |
| 8 | @us = common global i16 0 ; <i16*> [#uses=15] |
| 9 | @si = common global i32 0 ; <i32*> [#uses=15] |
| 10 | @ui = common global i32 0 ; <i32*> [#uses=23] |
| 11 | @sl = common global i64 0, align 8 ; <i64*> [#uses=15] |
| 12 | @ul = common global i64 0, align 8 ; <i64*> [#uses=15] |
| 13 | @sll = common global i64 0, align 8 ; <i64*> [#uses=1] |
| 14 | @ull = common global i64 0, align 8 ; <i64*> [#uses=1] |
| 15 | |
| 16 | define void @test_op_ignore() nounwind { |
| 17 | entry: |
| 18 | call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:0 [#uses=0] |
| 19 | call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:1 [#uses=0] |
| 20 | bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1] |
| 21 | call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 1 ) ; <i16>:3 [#uses=0] |
| 22 | bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1] |
| 23 | call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 1 ) ; <i16>:5 [#uses=0] |
| 24 | bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1] |
| 25 | call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 1 ) ; <i32>:7 [#uses=0] |
| 26 | bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1] |
| 27 | call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 1 ) ; <i32>:9 [#uses=0] |
| 28 | bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:10 [#uses=1] |
| 29 | call i64 @llvm.atomic.load.add.i64.p0i64( i64* %10, i64 1 ) ; <i64>:11 [#uses=0] |
| 30 | bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:12 [#uses=1] |
| 31 | call i64 @llvm.atomic.load.add.i64.p0i64( i64* %12, i64 1 ) ; <i64>:13 [#uses=0] |
| 32 | call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:14 [#uses=0] |
| 33 | call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:15 [#uses=0] |
| 34 | bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:16 [#uses=1] |
| 35 | call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %16, i16 1 ) ; <i16>:17 [#uses=0] |
| 36 | bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:18 [#uses=1] |
| 37 | call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %18, i16 1 ) ; <i16>:19 [#uses=0] |
| 38 | bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:20 [#uses=1] |
| 39 | call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %20, i32 1 ) ; <i32>:21 [#uses=0] |
| 40 | bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:22 [#uses=1] |
| 41 | call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %22, i32 1 ) ; <i32>:23 [#uses=0] |
| 42 | bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:24 [#uses=1] |
| 43 | call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %24, i64 1 ) ; <i64>:25 [#uses=0] |
| 44 | bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:26 [#uses=1] |
| 45 | call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %26, i64 1 ) ; <i64>:27 [#uses=0] |
| 46 | call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:28 [#uses=0] |
| 47 | call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:29 [#uses=0] |
| 48 | bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:30 [#uses=1] |
| 49 | call i16 @llvm.atomic.load.or.i16.p0i16( i16* %30, i16 1 ) ; <i16>:31 [#uses=0] |
| 50 | bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:32 [#uses=1] |
| 51 | call i16 @llvm.atomic.load.or.i16.p0i16( i16* %32, i16 1 ) ; <i16>:33 [#uses=0] |
| 52 | bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:34 [#uses=1] |
| 53 | call i32 @llvm.atomic.load.or.i32.p0i32( i32* %34, i32 1 ) ; <i32>:35 [#uses=0] |
| 54 | bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:36 [#uses=1] |
| 55 | call i32 @llvm.atomic.load.or.i32.p0i32( i32* %36, i32 1 ) ; <i32>:37 [#uses=0] |
| 56 | bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:38 [#uses=1] |
| 57 | call i64 @llvm.atomic.load.or.i64.p0i64( i64* %38, i64 1 ) ; <i64>:39 [#uses=0] |
| 58 | bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:40 [#uses=1] |
| 59 | call i64 @llvm.atomic.load.or.i64.p0i64( i64* %40, i64 1 ) ; <i64>:41 [#uses=0] |
| 60 | call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:42 [#uses=0] |
| 61 | call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:43 [#uses=0] |
| 62 | bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:44 [#uses=1] |
| 63 | call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %44, i16 1 ) ; <i16>:45 [#uses=0] |
| 64 | bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:46 [#uses=1] |
| 65 | call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %46, i16 1 ) ; <i16>:47 [#uses=0] |
| 66 | bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:48 [#uses=1] |
| 67 | call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %48, i32 1 ) ; <i32>:49 [#uses=0] |
| 68 | bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:50 [#uses=1] |
| 69 | call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %50, i32 1 ) ; <i32>:51 [#uses=0] |
| 70 | bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:52 [#uses=1] |
| 71 | call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %52, i64 1 ) ; <i64>:53 [#uses=0] |
| 72 | bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:54 [#uses=1] |
| 73 | call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %54, i64 1 ) ; <i64>:55 [#uses=0] |
| 74 | call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:56 [#uses=0] |
| 75 | call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:57 [#uses=0] |
| 76 | bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:58 [#uses=1] |
| 77 | call i16 @llvm.atomic.load.and.i16.p0i16( i16* %58, i16 1 ) ; <i16>:59 [#uses=0] |
| 78 | bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:60 [#uses=1] |
| 79 | call i16 @llvm.atomic.load.and.i16.p0i16( i16* %60, i16 1 ) ; <i16>:61 [#uses=0] |
| 80 | bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:62 [#uses=1] |
| 81 | call i32 @llvm.atomic.load.and.i32.p0i32( i32* %62, i32 1 ) ; <i32>:63 [#uses=0] |
| 82 | bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:64 [#uses=1] |
| 83 | call i32 @llvm.atomic.load.and.i32.p0i32( i32* %64, i32 1 ) ; <i32>:65 [#uses=0] |
| 84 | bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:66 [#uses=1] |
| 85 | call i64 @llvm.atomic.load.and.i64.p0i64( i64* %66, i64 1 ) ; <i64>:67 [#uses=0] |
| 86 | bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:68 [#uses=1] |
| 87 | call i64 @llvm.atomic.load.and.i64.p0i64( i64* %68, i64 1 ) ; <i64>:69 [#uses=0] |
| 88 | call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:70 [#uses=0] |
| 89 | call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:71 [#uses=0] |
| 90 | bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:72 [#uses=1] |
| 91 | call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %72, i16 1 ) ; <i16>:73 [#uses=0] |
| 92 | bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:74 [#uses=1] |
| 93 | call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %74, i16 1 ) ; <i16>:75 [#uses=0] |
| 94 | bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:76 [#uses=1] |
| 95 | call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %76, i32 1 ) ; <i32>:77 [#uses=0] |
| 96 | bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:78 [#uses=1] |
| 97 | call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %78, i32 1 ) ; <i32>:79 [#uses=0] |
| 98 | bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:80 [#uses=1] |
| 99 | call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %80, i64 1 ) ; <i64>:81 [#uses=0] |
| 100 | bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:82 [#uses=1] |
| 101 | call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %82, i64 1 ) ; <i64>:83 [#uses=0] |
| 102 | br label %return |
| 103 | |
| 104 | return: ; preds = %entry |
| 105 | ret void |
| 106 | } |
| 107 | |
| 108 | declare i8 @llvm.atomic.load.add.i8.p0i8(i8*, i8) nounwind |
| 109 | |
| 110 | declare i16 @llvm.atomic.load.add.i16.p0i16(i16*, i16) nounwind |
| 111 | |
| 112 | declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind |
| 113 | |
| 114 | declare i64 @llvm.atomic.load.add.i64.p0i64(i64*, i64) nounwind |
| 115 | |
| 116 | declare i8 @llvm.atomic.load.sub.i8.p0i8(i8*, i8) nounwind |
| 117 | |
| 118 | declare i16 @llvm.atomic.load.sub.i16.p0i16(i16*, i16) nounwind |
| 119 | |
| 120 | declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind |
| 121 | |
| 122 | declare i64 @llvm.atomic.load.sub.i64.p0i64(i64*, i64) nounwind |
| 123 | |
| 124 | declare i8 @llvm.atomic.load.or.i8.p0i8(i8*, i8) nounwind |
| 125 | |
| 126 | declare i16 @llvm.atomic.load.or.i16.p0i16(i16*, i16) nounwind |
| 127 | |
| 128 | declare i32 @llvm.atomic.load.or.i32.p0i32(i32*, i32) nounwind |
| 129 | |
| 130 | declare i64 @llvm.atomic.load.or.i64.p0i64(i64*, i64) nounwind |
| 131 | |
| 132 | declare i8 @llvm.atomic.load.xor.i8.p0i8(i8*, i8) nounwind |
| 133 | |
| 134 | declare i16 @llvm.atomic.load.xor.i16.p0i16(i16*, i16) nounwind |
| 135 | |
| 136 | declare i32 @llvm.atomic.load.xor.i32.p0i32(i32*, i32) nounwind |
| 137 | |
| 138 | declare i64 @llvm.atomic.load.xor.i64.p0i64(i64*, i64) nounwind |
| 139 | |
| 140 | declare i8 @llvm.atomic.load.and.i8.p0i8(i8*, i8) nounwind |
| 141 | |
| 142 | declare i16 @llvm.atomic.load.and.i16.p0i16(i16*, i16) nounwind |
| 143 | |
| 144 | declare i32 @llvm.atomic.load.and.i32.p0i32(i32*, i32) nounwind |
| 145 | |
| 146 | declare i64 @llvm.atomic.load.and.i64.p0i64(i64*, i64) nounwind |
| 147 | |
| 148 | declare i8 @llvm.atomic.load.nand.i8.p0i8(i8*, i8) nounwind |
| 149 | |
| 150 | declare i16 @llvm.atomic.load.nand.i16.p0i16(i16*, i16) nounwind |
| 151 | |
| 152 | declare i32 @llvm.atomic.load.nand.i32.p0i32(i32*, i32) nounwind |
| 153 | |
| 154 | declare i64 @llvm.atomic.load.nand.i64.p0i64(i64*, i64) nounwind |
| 155 | |
| 156 | define void @test_fetch_and_op() nounwind { |
| 157 | entry: |
| 158 | call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:0 [#uses=1] |
| 159 | store i8 %0, i8* @sc, align 1 |
| 160 | call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:1 [#uses=1] |
| 161 | store i8 %1, i8* @uc, align 1 |
| 162 | bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1] |
| 163 | call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 11 ) ; <i16>:3 [#uses=1] |
| 164 | store i16 %3, i16* @ss, align 2 |
| 165 | bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1] |
| 166 | call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 11 ) ; <i16>:5 [#uses=1] |
| 167 | store i16 %5, i16* @us, align 2 |
| 168 | bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1] |
| 169 | call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 11 ) ; <i32>:7 [#uses=1] |
| 170 | store i32 %7, i32* @si, align 4 |
| 171 | bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1] |
| 172 | call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 11 ) ; <i32>:9 [#uses=1] |
| 173 | store i32 %9, i32* @ui, align 4 |
| 174 | bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:10 [#uses=1] |
| 175 | call i64 @llvm.atomic.load.add.i64.p0i64( i64* %10, i64 11 ) ; <i64>:11 [#uses=1] |
| 176 | store i64 %11, i64* @sl, align 8 |
| 177 | bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:12 [#uses=1] |
| 178 | call i64 @llvm.atomic.load.add.i64.p0i64( i64* %12, i64 11 ) ; <i64>:13 [#uses=1] |
| 179 | store i64 %13, i64* @ul, align 8 |
| 180 | call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:14 [#uses=1] |
| 181 | store i8 %14, i8* @sc, align 1 |
| 182 | call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:15 [#uses=1] |
| 183 | store i8 %15, i8* @uc, align 1 |
| 184 | bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:16 [#uses=1] |
| 185 | call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %16, i16 11 ) ; <i16>:17 [#uses=1] |
| 186 | store i16 %17, i16* @ss, align 2 |
| 187 | bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:18 [#uses=1] |
| 188 | call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %18, i16 11 ) ; <i16>:19 [#uses=1] |
| 189 | store i16 %19, i16* @us, align 2 |
| 190 | bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:20 [#uses=1] |
| 191 | call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %20, i32 11 ) ; <i32>:21 [#uses=1] |
| 192 | store i32 %21, i32* @si, align 4 |
| 193 | bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:22 [#uses=1] |
| 194 | call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %22, i32 11 ) ; <i32>:23 [#uses=1] |
| 195 | store i32 %23, i32* @ui, align 4 |
| 196 | bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:24 [#uses=1] |
| 197 | call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %24, i64 11 ) ; <i64>:25 [#uses=1] |
| 198 | store i64 %25, i64* @sl, align 8 |
| 199 | bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:26 [#uses=1] |
| 200 | call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %26, i64 11 ) ; <i64>:27 [#uses=1] |
| 201 | store i64 %27, i64* @ul, align 8 |
| 202 | call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:28 [#uses=1] |
| 203 | store i8 %28, i8* @sc, align 1 |
| 204 | call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:29 [#uses=1] |
| 205 | store i8 %29, i8* @uc, align 1 |
| 206 | bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:30 [#uses=1] |
| 207 | call i16 @llvm.atomic.load.or.i16.p0i16( i16* %30, i16 11 ) ; <i16>:31 [#uses=1] |
| 208 | store i16 %31, i16* @ss, align 2 |
| 209 | bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:32 [#uses=1] |
| 210 | call i16 @llvm.atomic.load.or.i16.p0i16( i16* %32, i16 11 ) ; <i16>:33 [#uses=1] |
| 211 | store i16 %33, i16* @us, align 2 |
| 212 | bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:34 [#uses=1] |
| 213 | call i32 @llvm.atomic.load.or.i32.p0i32( i32* %34, i32 11 ) ; <i32>:35 [#uses=1] |
| 214 | store i32 %35, i32* @si, align 4 |
| 215 | bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:36 [#uses=1] |
| 216 | call i32 @llvm.atomic.load.or.i32.p0i32( i32* %36, i32 11 ) ; <i32>:37 [#uses=1] |
| 217 | store i32 %37, i32* @ui, align 4 |
| 218 | bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:38 [#uses=1] |
| 219 | call i64 @llvm.atomic.load.or.i64.p0i64( i64* %38, i64 11 ) ; <i64>:39 [#uses=1] |
| 220 | store i64 %39, i64* @sl, align 8 |
| 221 | bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:40 [#uses=1] |
| 222 | call i64 @llvm.atomic.load.or.i64.p0i64( i64* %40, i64 11 ) ; <i64>:41 [#uses=1] |
| 223 | store i64 %41, i64* @ul, align 8 |
| 224 | call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:42 [#uses=1] |
| 225 | store i8 %42, i8* @sc, align 1 |
| 226 | call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:43 [#uses=1] |
| 227 | store i8 %43, i8* @uc, align 1 |
| 228 | bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:44 [#uses=1] |
| 229 | call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %44, i16 11 ) ; <i16>:45 [#uses=1] |
| 230 | store i16 %45, i16* @ss, align 2 |
| 231 | bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:46 [#uses=1] |
| 232 | call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %46, i16 11 ) ; <i16>:47 [#uses=1] |
| 233 | store i16 %47, i16* @us, align 2 |
| 234 | bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:48 [#uses=1] |
| 235 | call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %48, i32 11 ) ; <i32>:49 [#uses=1] |
| 236 | store i32 %49, i32* @si, align 4 |
| 237 | bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:50 [#uses=1] |
| 238 | call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %50, i32 11 ) ; <i32>:51 [#uses=1] |
| 239 | store i32 %51, i32* @ui, align 4 |
| 240 | bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:52 [#uses=1] |
| 241 | call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %52, i64 11 ) ; <i64>:53 [#uses=1] |
| 242 | store i64 %53, i64* @sl, align 8 |
| 243 | bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:54 [#uses=1] |
| 244 | call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %54, i64 11 ) ; <i64>:55 [#uses=1] |
| 245 | store i64 %55, i64* @ul, align 8 |
| 246 | call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:56 [#uses=1] |
| 247 | store i8 %56, i8* @sc, align 1 |
| 248 | call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:57 [#uses=1] |
| 249 | store i8 %57, i8* @uc, align 1 |
| 250 | bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:58 [#uses=1] |
| 251 | call i16 @llvm.atomic.load.and.i16.p0i16( i16* %58, i16 11 ) ; <i16>:59 [#uses=1] |
| 252 | store i16 %59, i16* @ss, align 2 |
| 253 | bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:60 [#uses=1] |
| 254 | call i16 @llvm.atomic.load.and.i16.p0i16( i16* %60, i16 11 ) ; <i16>:61 [#uses=1] |
| 255 | store i16 %61, i16* @us, align 2 |
| 256 | bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:62 [#uses=1] |
| 257 | call i32 @llvm.atomic.load.and.i32.p0i32( i32* %62, i32 11 ) ; <i32>:63 [#uses=1] |
| 258 | store i32 %63, i32* @si, align 4 |
| 259 | bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:64 [#uses=1] |
| 260 | call i32 @llvm.atomic.load.and.i32.p0i32( i32* %64, i32 11 ) ; <i32>:65 [#uses=1] |
| 261 | store i32 %65, i32* @ui, align 4 |
| 262 | bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:66 [#uses=1] |
| 263 | call i64 @llvm.atomic.load.and.i64.p0i64( i64* %66, i64 11 ) ; <i64>:67 [#uses=1] |
| 264 | store i64 %67, i64* @sl, align 8 |
| 265 | bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:68 [#uses=1] |
| 266 | call i64 @llvm.atomic.load.and.i64.p0i64( i64* %68, i64 11 ) ; <i64>:69 [#uses=1] |
| 267 | store i64 %69, i64* @ul, align 8 |
| 268 | call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:70 [#uses=1] |
| 269 | store i8 %70, i8* @sc, align 1 |
| 270 | call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:71 [#uses=1] |
| 271 | store i8 %71, i8* @uc, align 1 |
| 272 | bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:72 [#uses=1] |
| 273 | call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %72, i16 11 ) ; <i16>:73 [#uses=1] |
| 274 | store i16 %73, i16* @ss, align 2 |
| 275 | bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:74 [#uses=1] |
| 276 | call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %74, i16 11 ) ; <i16>:75 [#uses=1] |
| 277 | store i16 %75, i16* @us, align 2 |
| 278 | bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:76 [#uses=1] |
| 279 | call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %76, i32 11 ) ; <i32>:77 [#uses=1] |
| 280 | store i32 %77, i32* @si, align 4 |
| 281 | bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:78 [#uses=1] |
| 282 | call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %78, i32 11 ) ; <i32>:79 [#uses=1] |
| 283 | store i32 %79, i32* @ui, align 4 |
| 284 | bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:80 [#uses=1] |
| 285 | call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %80, i64 11 ) ; <i64>:81 [#uses=1] |
| 286 | store i64 %81, i64* @sl, align 8 |
| 287 | bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:82 [#uses=1] |
| 288 | call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %82, i64 11 ) ; <i64>:83 [#uses=1] |
| 289 | store i64 %83, i64* @ul, align 8 |
| 290 | br label %return |
| 291 | |
| 292 | return: ; preds = %entry |
| 293 | ret void |
| 294 | } |
| 295 | |
| 296 | define void @test_op_and_fetch() nounwind { |
| 297 | entry: |
| 298 | load i8* @uc, align 1 ; <i8>:0 [#uses=2] |
| 299 | call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 %0 ) ; <i8>:1 [#uses=1] |
| 300 | add i8 %1, %0 ; <i8>:2 [#uses=1] |
| 301 | store i8 %2, i8* @sc, align 1 |
| 302 | load i8* @uc, align 1 ; <i8>:3 [#uses=2] |
| 303 | call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 %3 ) ; <i8>:4 [#uses=1] |
| 304 | add i8 %4, %3 ; <i8>:5 [#uses=1] |
| 305 | store i8 %5, i8* @uc, align 1 |
| 306 | load i8* @uc, align 1 ; <i8>:6 [#uses=1] |
| 307 | zext i8 %6 to i16 ; <i16>:7 [#uses=2] |
| 308 | bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:8 [#uses=1] |
| 309 | call i16 @llvm.atomic.load.add.i16.p0i16( i16* %8, i16 %7 ) ; <i16>:9 [#uses=1] |
| 310 | add i16 %9, %7 ; <i16>:10 [#uses=1] |
| 311 | store i16 %10, i16* @ss, align 2 |
| 312 | load i8* @uc, align 1 ; <i8>:11 [#uses=1] |
| 313 | zext i8 %11 to i16 ; <i16>:12 [#uses=2] |
| 314 | bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:13 [#uses=1] |
| 315 | call i16 @llvm.atomic.load.add.i16.p0i16( i16* %13, i16 %12 ) ; <i16>:14 [#uses=1] |
| 316 | add i16 %14, %12 ; <i16>:15 [#uses=1] |
| 317 | store i16 %15, i16* @us, align 2 |
| 318 | load i8* @uc, align 1 ; <i8>:16 [#uses=1] |
| 319 | zext i8 %16 to i32 ; <i32>:17 [#uses=2] |
| 320 | bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:18 [#uses=1] |
| 321 | call i32 @llvm.atomic.load.add.i32.p0i32( i32* %18, i32 %17 ) ; <i32>:19 [#uses=1] |
| 322 | add i32 %19, %17 ; <i32>:20 [#uses=1] |
| 323 | store i32 %20, i32* @si, align 4 |
| 324 | load i8* @uc, align 1 ; <i8>:21 [#uses=1] |
| 325 | zext i8 %21 to i32 ; <i32>:22 [#uses=2] |
| 326 | bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:23 [#uses=1] |
| 327 | call i32 @llvm.atomic.load.add.i32.p0i32( i32* %23, i32 %22 ) ; <i32>:24 [#uses=1] |
| 328 | add i32 %24, %22 ; <i32>:25 [#uses=1] |
| 329 | store i32 %25, i32* @ui, align 4 |
| 330 | load i8* @uc, align 1 ; <i8>:26 [#uses=1] |
| 331 | zext i8 %26 to i64 ; <i64>:27 [#uses=2] |
| 332 | bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:28 [#uses=1] |
| 333 | call i64 @llvm.atomic.load.add.i64.p0i64( i64* %28, i64 %27 ) ; <i64>:29 [#uses=1] |
| 334 | add i64 %29, %27 ; <i64>:30 [#uses=1] |
| 335 | store i64 %30, i64* @sl, align 8 |
| 336 | load i8* @uc, align 1 ; <i8>:31 [#uses=1] |
| 337 | zext i8 %31 to i64 ; <i64>:32 [#uses=2] |
| 338 | bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:33 [#uses=1] |
| 339 | call i64 @llvm.atomic.load.add.i64.p0i64( i64* %33, i64 %32 ) ; <i64>:34 [#uses=1] |
| 340 | add i64 %34, %32 ; <i64>:35 [#uses=1] |
| 341 | store i64 %35, i64* @ul, align 8 |
| 342 | load i8* @uc, align 1 ; <i8>:36 [#uses=2] |
| 343 | call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 %36 ) ; <i8>:37 [#uses=1] |
| 344 | sub i8 %37, %36 ; <i8>:38 [#uses=1] |
| 345 | store i8 %38, i8* @sc, align 1 |
| 346 | load i8* @uc, align 1 ; <i8>:39 [#uses=2] |
| 347 | call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 %39 ) ; <i8>:40 [#uses=1] |
| 348 | sub i8 %40, %39 ; <i8>:41 [#uses=1] |
| 349 | store i8 %41, i8* @uc, align 1 |
| 350 | load i8* @uc, align 1 ; <i8>:42 [#uses=1] |
| 351 | zext i8 %42 to i16 ; <i16>:43 [#uses=2] |
| 352 | bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:44 [#uses=1] |
| 353 | call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %44, i16 %43 ) ; <i16>:45 [#uses=1] |
| 354 | sub i16 %45, %43 ; <i16>:46 [#uses=1] |
| 355 | store i16 %46, i16* @ss, align 2 |
| 356 | load i8* @uc, align 1 ; <i8>:47 [#uses=1] |
| 357 | zext i8 %47 to i16 ; <i16>:48 [#uses=2] |
| 358 | bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:49 [#uses=1] |
| 359 | call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %49, i16 %48 ) ; <i16>:50 [#uses=1] |
| 360 | sub i16 %50, %48 ; <i16>:51 [#uses=1] |
| 361 | store i16 %51, i16* @us, align 2 |
| 362 | load i8* @uc, align 1 ; <i8>:52 [#uses=1] |
| 363 | zext i8 %52 to i32 ; <i32>:53 [#uses=2] |
| 364 | bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:54 [#uses=1] |
| 365 | call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %54, i32 %53 ) ; <i32>:55 [#uses=1] |
| 366 | sub i32 %55, %53 ; <i32>:56 [#uses=1] |
| 367 | store i32 %56, i32* @si, align 4 |
| 368 | load i8* @uc, align 1 ; <i8>:57 [#uses=1] |
| 369 | zext i8 %57 to i32 ; <i32>:58 [#uses=2] |
| 370 | bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:59 [#uses=1] |
| 371 | call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %59, i32 %58 ) ; <i32>:60 [#uses=1] |
| 372 | sub i32 %60, %58 ; <i32>:61 [#uses=1] |
| 373 | store i32 %61, i32* @ui, align 4 |
| 374 | load i8* @uc, align 1 ; <i8>:62 [#uses=1] |
| 375 | zext i8 %62 to i64 ; <i64>:63 [#uses=2] |
| 376 | bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:64 [#uses=1] |
| 377 | call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %64, i64 %63 ) ; <i64>:65 [#uses=1] |
| 378 | sub i64 %65, %63 ; <i64>:66 [#uses=1] |
| 379 | store i64 %66, i64* @sl, align 8 |
| 380 | load i8* @uc, align 1 ; <i8>:67 [#uses=1] |
| 381 | zext i8 %67 to i64 ; <i64>:68 [#uses=2] |
| 382 | bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:69 [#uses=1] |
| 383 | call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %69, i64 %68 ) ; <i64>:70 [#uses=1] |
| 384 | sub i64 %70, %68 ; <i64>:71 [#uses=1] |
| 385 | store i64 %71, i64* @ul, align 8 |
| 386 | load i8* @uc, align 1 ; <i8>:72 [#uses=2] |
| 387 | call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 %72 ) ; <i8>:73 [#uses=1] |
| 388 | or i8 %73, %72 ; <i8>:74 [#uses=1] |
| 389 | store i8 %74, i8* @sc, align 1 |
| 390 | load i8* @uc, align 1 ; <i8>:75 [#uses=2] |
| 391 | call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 %75 ) ; <i8>:76 [#uses=1] |
| 392 | or i8 %76, %75 ; <i8>:77 [#uses=1] |
| 393 | store i8 %77, i8* @uc, align 1 |
| 394 | load i8* @uc, align 1 ; <i8>:78 [#uses=1] |
| 395 | zext i8 %78 to i16 ; <i16>:79 [#uses=2] |
| 396 | bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:80 [#uses=1] |
| 397 | call i16 @llvm.atomic.load.or.i16.p0i16( i16* %80, i16 %79 ) ; <i16>:81 [#uses=1] |
| 398 | or i16 %81, %79 ; <i16>:82 [#uses=1] |
| 399 | store i16 %82, i16* @ss, align 2 |
| 400 | load i8* @uc, align 1 ; <i8>:83 [#uses=1] |
| 401 | zext i8 %83 to i16 ; <i16>:84 [#uses=2] |
| 402 | bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:85 [#uses=1] |
| 403 | call i16 @llvm.atomic.load.or.i16.p0i16( i16* %85, i16 %84 ) ; <i16>:86 [#uses=1] |
| 404 | or i16 %86, %84 ; <i16>:87 [#uses=1] |
| 405 | store i16 %87, i16* @us, align 2 |
| 406 | load i8* @uc, align 1 ; <i8>:88 [#uses=1] |
| 407 | zext i8 %88 to i32 ; <i32>:89 [#uses=2] |
| 408 | bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:90 [#uses=1] |
| 409 | call i32 @llvm.atomic.load.or.i32.p0i32( i32* %90, i32 %89 ) ; <i32>:91 [#uses=1] |
| 410 | or i32 %91, %89 ; <i32>:92 [#uses=1] |
| 411 | store i32 %92, i32* @si, align 4 |
| 412 | load i8* @uc, align 1 ; <i8>:93 [#uses=1] |
| 413 | zext i8 %93 to i32 ; <i32>:94 [#uses=2] |
| 414 | bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:95 [#uses=1] |
| 415 | call i32 @llvm.atomic.load.or.i32.p0i32( i32* %95, i32 %94 ) ; <i32>:96 [#uses=1] |
| 416 | or i32 %96, %94 ; <i32>:97 [#uses=1] |
| 417 | store i32 %97, i32* @ui, align 4 |
| 418 | load i8* @uc, align 1 ; <i8>:98 [#uses=1] |
| 419 | zext i8 %98 to i64 ; <i64>:99 [#uses=2] |
| 420 | bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:100 [#uses=1] |
| 421 | call i64 @llvm.atomic.load.or.i64.p0i64( i64* %100, i64 %99 ) ; <i64>:101 [#uses=1] |
| 422 | or i64 %101, %99 ; <i64>:102 [#uses=1] |
| 423 | store i64 %102, i64* @sl, align 8 |
| 424 | load i8* @uc, align 1 ; <i8>:103 [#uses=1] |
| 425 | zext i8 %103 to i64 ; <i64>:104 [#uses=2] |
| 426 | bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:105 [#uses=1] |
| 427 | call i64 @llvm.atomic.load.or.i64.p0i64( i64* %105, i64 %104 ) ; <i64>:106 [#uses=1] |
| 428 | or i64 %106, %104 ; <i64>:107 [#uses=1] |
| 429 | store i64 %107, i64* @ul, align 8 |
| 430 | load i8* @uc, align 1 ; <i8>:108 [#uses=2] |
| 431 | call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 %108 ) ; <i8>:109 [#uses=1] |
| 432 | xor i8 %109, %108 ; <i8>:110 [#uses=1] |
| 433 | store i8 %110, i8* @sc, align 1 |
| 434 | load i8* @uc, align 1 ; <i8>:111 [#uses=2] |
| 435 | call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 %111 ) ; <i8>:112 [#uses=1] |
| 436 | xor i8 %112, %111 ; <i8>:113 [#uses=1] |
| 437 | store i8 %113, i8* @uc, align 1 |
| 438 | load i8* @uc, align 1 ; <i8>:114 [#uses=1] |
| 439 | zext i8 %114 to i16 ; <i16>:115 [#uses=2] |
| 440 | bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:116 [#uses=1] |
| 441 | call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %116, i16 %115 ) ; <i16>:117 [#uses=1] |
| 442 | xor i16 %117, %115 ; <i16>:118 [#uses=1] |
| 443 | store i16 %118, i16* @ss, align 2 |
| 444 | load i8* @uc, align 1 ; <i8>:119 [#uses=1] |
| 445 | zext i8 %119 to i16 ; <i16>:120 [#uses=2] |
| 446 | bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:121 [#uses=1] |
| 447 | call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %121, i16 %120 ) ; <i16>:122 [#uses=1] |
| 448 | xor i16 %122, %120 ; <i16>:123 [#uses=1] |
| 449 | store i16 %123, i16* @us, align 2 |
| 450 | load i8* @uc, align 1 ; <i8>:124 [#uses=1] |
| 451 | zext i8 %124 to i32 ; <i32>:125 [#uses=2] |
| 452 | bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:126 [#uses=1] |
| 453 | call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %126, i32 %125 ) ; <i32>:127 [#uses=1] |
| 454 | xor i32 %127, %125 ; <i32>:128 [#uses=1] |
| 455 | store i32 %128, i32* @si, align 4 |
| 456 | load i8* @uc, align 1 ; <i8>:129 [#uses=1] |
| 457 | zext i8 %129 to i32 ; <i32>:130 [#uses=2] |
| 458 | bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:131 [#uses=1] |
| 459 | call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %131, i32 %130 ) ; <i32>:132 [#uses=1] |
| 460 | xor i32 %132, %130 ; <i32>:133 [#uses=1] |
| 461 | store i32 %133, i32* @ui, align 4 |
| 462 | load i8* @uc, align 1 ; <i8>:134 [#uses=1] |
| 463 | zext i8 %134 to i64 ; <i64>:135 [#uses=2] |
| 464 | bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:136 [#uses=1] |
| 465 | call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %136, i64 %135 ) ; <i64>:137 [#uses=1] |
| 466 | xor i64 %137, %135 ; <i64>:138 [#uses=1] |
| 467 | store i64 %138, i64* @sl, align 8 |
| 468 | load i8* @uc, align 1 ; <i8>:139 [#uses=1] |
| 469 | zext i8 %139 to i64 ; <i64>:140 [#uses=2] |
| 470 | bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:141 [#uses=1] |
| 471 | call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %141, i64 %140 ) ; <i64>:142 [#uses=1] |
| 472 | xor i64 %142, %140 ; <i64>:143 [#uses=1] |
| 473 | store i64 %143, i64* @ul, align 8 |
| 474 | load i8* @uc, align 1 ; <i8>:144 [#uses=2] |
| 475 | call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 %144 ) ; <i8>:145 [#uses=1] |
| 476 | and i8 %145, %144 ; <i8>:146 [#uses=1] |
| 477 | store i8 %146, i8* @sc, align 1 |
| 478 | load i8* @uc, align 1 ; <i8>:147 [#uses=2] |
| 479 | call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 %147 ) ; <i8>:148 [#uses=1] |
| 480 | and i8 %148, %147 ; <i8>:149 [#uses=1] |
| 481 | store i8 %149, i8* @uc, align 1 |
| 482 | load i8* @uc, align 1 ; <i8>:150 [#uses=1] |
| 483 | zext i8 %150 to i16 ; <i16>:151 [#uses=2] |
| 484 | bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:152 [#uses=1] |
| 485 | call i16 @llvm.atomic.load.and.i16.p0i16( i16* %152, i16 %151 ) ; <i16>:153 [#uses=1] |
| 486 | and i16 %153, %151 ; <i16>:154 [#uses=1] |
| 487 | store i16 %154, i16* @ss, align 2 |
| 488 | load i8* @uc, align 1 ; <i8>:155 [#uses=1] |
| 489 | zext i8 %155 to i16 ; <i16>:156 [#uses=2] |
| 490 | bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:157 [#uses=1] |
| 491 | call i16 @llvm.atomic.load.and.i16.p0i16( i16* %157, i16 %156 ) ; <i16>:158 [#uses=1] |
| 492 | and i16 %158, %156 ; <i16>:159 [#uses=1] |
| 493 | store i16 %159, i16* @us, align 2 |
| 494 | load i8* @uc, align 1 ; <i8>:160 [#uses=1] |
| 495 | zext i8 %160 to i32 ; <i32>:161 [#uses=2] |
| 496 | bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:162 [#uses=1] |
| 497 | call i32 @llvm.atomic.load.and.i32.p0i32( i32* %162, i32 %161 ) ; <i32>:163 [#uses=1] |
| 498 | and i32 %163, %161 ; <i32>:164 [#uses=1] |
| 499 | store i32 %164, i32* @si, align 4 |
| 500 | load i8* @uc, align 1 ; <i8>:165 [#uses=1] |
| 501 | zext i8 %165 to i32 ; <i32>:166 [#uses=2] |
| 502 | bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:167 [#uses=1] |
| 503 | call i32 @llvm.atomic.load.and.i32.p0i32( i32* %167, i32 %166 ) ; <i32>:168 [#uses=1] |
| 504 | and i32 %168, %166 ; <i32>:169 [#uses=1] |
| 505 | store i32 %169, i32* @ui, align 4 |
| 506 | load i8* @uc, align 1 ; <i8>:170 [#uses=1] |
| 507 | zext i8 %170 to i64 ; <i64>:171 [#uses=2] |
| 508 | bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:172 [#uses=1] |
| 509 | call i64 @llvm.atomic.load.and.i64.p0i64( i64* %172, i64 %171 ) ; <i64>:173 [#uses=1] |
| 510 | and i64 %173, %171 ; <i64>:174 [#uses=1] |
| 511 | store i64 %174, i64* @sl, align 8 |
| 512 | load i8* @uc, align 1 ; <i8>:175 [#uses=1] |
| 513 | zext i8 %175 to i64 ; <i64>:176 [#uses=2] |
| 514 | bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:177 [#uses=1] |
| 515 | call i64 @llvm.atomic.load.and.i64.p0i64( i64* %177, i64 %176 ) ; <i64>:178 [#uses=1] |
| 516 | and i64 %178, %176 ; <i64>:179 [#uses=1] |
| 517 | store i64 %179, i64* @ul, align 8 |
| 518 | load i8* @uc, align 1 ; <i8>:180 [#uses=2] |
| 519 | call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 %180 ) ; <i8>:181 [#uses=1] |
| 520 | xor i8 %181, -1 ; <i8>:182 [#uses=1] |
| 521 | and i8 %182, %180 ; <i8>:183 [#uses=1] |
| 522 | store i8 %183, i8* @sc, align 1 |
| 523 | load i8* @uc, align 1 ; <i8>:184 [#uses=2] |
| 524 | call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 %184 ) ; <i8>:185 [#uses=1] |
| 525 | xor i8 %185, -1 ; <i8>:186 [#uses=1] |
| 526 | and i8 %186, %184 ; <i8>:187 [#uses=1] |
| 527 | store i8 %187, i8* @uc, align 1 |
| 528 | load i8* @uc, align 1 ; <i8>:188 [#uses=1] |
| 529 | zext i8 %188 to i16 ; <i16>:189 [#uses=2] |
| 530 | bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:190 [#uses=1] |
| 531 | call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %190, i16 %189 ) ; <i16>:191 [#uses=1] |
| 532 | xor i16 %191, -1 ; <i16>:192 [#uses=1] |
| 533 | and i16 %192, %189 ; <i16>:193 [#uses=1] |
| 534 | store i16 %193, i16* @ss, align 2 |
| 535 | load i8* @uc, align 1 ; <i8>:194 [#uses=1] |
| 536 | zext i8 %194 to i16 ; <i16>:195 [#uses=2] |
| 537 | bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:196 [#uses=1] |
| 538 | call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %196, i16 %195 ) ; <i16>:197 [#uses=1] |
| 539 | xor i16 %197, -1 ; <i16>:198 [#uses=1] |
| 540 | and i16 %198, %195 ; <i16>:199 [#uses=1] |
| 541 | store i16 %199, i16* @us, align 2 |
| 542 | load i8* @uc, align 1 ; <i8>:200 [#uses=1] |
| 543 | zext i8 %200 to i32 ; <i32>:201 [#uses=2] |
| 544 | bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:202 [#uses=1] |
| 545 | call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %202, i32 %201 ) ; <i32>:203 [#uses=1] |
| 546 | xor i32 %203, -1 ; <i32>:204 [#uses=1] |
| 547 | and i32 %204, %201 ; <i32>:205 [#uses=1] |
| 548 | store i32 %205, i32* @si, align 4 |
| 549 | load i8* @uc, align 1 ; <i8>:206 [#uses=1] |
| 550 | zext i8 %206 to i32 ; <i32>:207 [#uses=2] |
| 551 | bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:208 [#uses=1] |
| 552 | call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %208, i32 %207 ) ; <i32>:209 [#uses=1] |
| 553 | xor i32 %209, -1 ; <i32>:210 [#uses=1] |
| 554 | and i32 %210, %207 ; <i32>:211 [#uses=1] |
| 555 | store i32 %211, i32* @ui, align 4 |
| 556 | load i8* @uc, align 1 ; <i8>:212 [#uses=1] |
| 557 | zext i8 %212 to i64 ; <i64>:213 [#uses=2] |
| 558 | bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:214 [#uses=1] |
| 559 | call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %214, i64 %213 ) ; <i64>:215 [#uses=1] |
| 560 | xor i64 %215, -1 ; <i64>:216 [#uses=1] |
| 561 | and i64 %216, %213 ; <i64>:217 [#uses=1] |
| 562 | store i64 %217, i64* @sl, align 8 |
| 563 | load i8* @uc, align 1 ; <i8>:218 [#uses=1] |
| 564 | zext i8 %218 to i64 ; <i64>:219 [#uses=2] |
| 565 | bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:220 [#uses=1] |
| 566 | call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %220, i64 %219 ) ; <i64>:221 [#uses=1] |
| 567 | xor i64 %221, -1 ; <i64>:222 [#uses=1] |
| 568 | and i64 %222, %219 ; <i64>:223 [#uses=1] |
| 569 | store i64 %223, i64* @ul, align 8 |
| 570 | br label %return |
| 571 | |
| 572 | return: ; preds = %entry |
| 573 | ret void |
| 574 | } |
| 575 | |
| 576 | define void @test_compare_and_swap() nounwind { |
| 577 | entry: |
| 578 | load i8* @uc, align 1 ; <i8>:0 [#uses=1] |
| 579 | load i8* @sc, align 1 ; <i8>:1 [#uses=1] |
| 580 | call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %0, i8 %1 ) ; <i8>:2 [#uses=1] |
| 581 | store i8 %2, i8* @sc, align 1 |
| 582 | load i8* @uc, align 1 ; <i8>:3 [#uses=1] |
| 583 | load i8* @sc, align 1 ; <i8>:4 [#uses=1] |
| 584 | call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %3, i8 %4 ) ; <i8>:5 [#uses=1] |
| 585 | store i8 %5, i8* @uc, align 1 |
| 586 | load i8* @uc, align 1 ; <i8>:6 [#uses=1] |
| 587 | zext i8 %6 to i16 ; <i16>:7 [#uses=1] |
| 588 | load i8* @sc, align 1 ; <i8>:8 [#uses=1] |
| 589 | sext i8 %8 to i16 ; <i16>:9 [#uses=1] |
| 590 | bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:10 [#uses=1] |
| 591 | call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %10, i16 %7, i16 %9 ) ; <i16>:11 [#uses=1] |
| 592 | store i16 %11, i16* @ss, align 2 |
| 593 | load i8* @uc, align 1 ; <i8>:12 [#uses=1] |
| 594 | zext i8 %12 to i16 ; <i16>:13 [#uses=1] |
| 595 | load i8* @sc, align 1 ; <i8>:14 [#uses=1] |
| 596 | sext i8 %14 to i16 ; <i16>:15 [#uses=1] |
| 597 | bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:16 [#uses=1] |
| 598 | call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %16, i16 %13, i16 %15 ) ; <i16>:17 [#uses=1] |
| 599 | store i16 %17, i16* @us, align 2 |
| 600 | load i8* @uc, align 1 ; <i8>:18 [#uses=1] |
| 601 | zext i8 %18 to i32 ; <i32>:19 [#uses=1] |
| 602 | load i8* @sc, align 1 ; <i8>:20 [#uses=1] |
| 603 | sext i8 %20 to i32 ; <i32>:21 [#uses=1] |
| 604 | bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:22 [#uses=1] |
| 605 | call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %22, i32 %19, i32 %21 ) ; <i32>:23 [#uses=1] |
| 606 | store i32 %23, i32* @si, align 4 |
| 607 | load i8* @uc, align 1 ; <i8>:24 [#uses=1] |
| 608 | zext i8 %24 to i32 ; <i32>:25 [#uses=1] |
| 609 | load i8* @sc, align 1 ; <i8>:26 [#uses=1] |
| 610 | sext i8 %26 to i32 ; <i32>:27 [#uses=1] |
| 611 | bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:28 [#uses=1] |
| 612 | call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %28, i32 %25, i32 %27 ) ; <i32>:29 [#uses=1] |
| 613 | store i32 %29, i32* @ui, align 4 |
| 614 | load i8* @uc, align 1 ; <i8>:30 [#uses=1] |
| 615 | zext i8 %30 to i64 ; <i64>:31 [#uses=1] |
| 616 | load i8* @sc, align 1 ; <i8>:32 [#uses=1] |
| 617 | sext i8 %32 to i64 ; <i64>:33 [#uses=1] |
| 618 | bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:34 [#uses=1] |
| 619 | call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %34, i64 %31, i64 %33 ) ; <i64>:35 [#uses=1] |
| 620 | store i64 %35, i64* @sl, align 8 |
| 621 | load i8* @uc, align 1 ; <i8>:36 [#uses=1] |
| 622 | zext i8 %36 to i64 ; <i64>:37 [#uses=1] |
| 623 | load i8* @sc, align 1 ; <i8>:38 [#uses=1] |
| 624 | sext i8 %38 to i64 ; <i64>:39 [#uses=1] |
| 625 | bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:40 [#uses=1] |
| 626 | call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %40, i64 %37, i64 %39 ) ; <i64>:41 [#uses=1] |
| 627 | store i64 %41, i64* @ul, align 8 |
| 628 | load i8* @uc, align 1 ; <i8>:42 [#uses=2] |
| 629 | load i8* @sc, align 1 ; <i8>:43 [#uses=1] |
| 630 | call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %42, i8 %43 ) ; <i8>:44 [#uses=1] |
| 631 | icmp eq i8 %44, %42 ; <i1>:45 [#uses=1] |
| 632 | zext i1 %45 to i8 ; <i8>:46 [#uses=1] |
| 633 | zext i8 %46 to i32 ; <i32>:47 [#uses=1] |
| 634 | store i32 %47, i32* @ui, align 4 |
| 635 | load i8* @uc, align 1 ; <i8>:48 [#uses=2] |
| 636 | load i8* @sc, align 1 ; <i8>:49 [#uses=1] |
| 637 | call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %48, i8 %49 ) ; <i8>:50 [#uses=1] |
| 638 | icmp eq i8 %50, %48 ; <i1>:51 [#uses=1] |
| 639 | zext i1 %51 to i8 ; <i8>:52 [#uses=1] |
| 640 | zext i8 %52 to i32 ; <i32>:53 [#uses=1] |
| 641 | store i32 %53, i32* @ui, align 4 |
| 642 | load i8* @uc, align 1 ; <i8>:54 [#uses=1] |
| 643 | zext i8 %54 to i16 ; <i16>:55 [#uses=2] |
| 644 | load i8* @sc, align 1 ; <i8>:56 [#uses=1] |
| 645 | sext i8 %56 to i16 ; <i16>:57 [#uses=1] |
| 646 | bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:58 [#uses=1] |
| 647 | call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %58, i16 %55, i16 %57 ) ; <i16>:59 [#uses=1] |
| 648 | icmp eq i16 %59, %55 ; <i1>:60 [#uses=1] |
| 649 | zext i1 %60 to i8 ; <i8>:61 [#uses=1] |
| 650 | zext i8 %61 to i32 ; <i32>:62 [#uses=1] |
| 651 | store i32 %62, i32* @ui, align 4 |
| 652 | load i8* @uc, align 1 ; <i8>:63 [#uses=1] |
| 653 | zext i8 %63 to i16 ; <i16>:64 [#uses=2] |
| 654 | load i8* @sc, align 1 ; <i8>:65 [#uses=1] |
| 655 | sext i8 %65 to i16 ; <i16>:66 [#uses=1] |
| 656 | bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:67 [#uses=1] |
| 657 | call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %67, i16 %64, i16 %66 ) ; <i16>:68 [#uses=1] |
| 658 | icmp eq i16 %68, %64 ; <i1>:69 [#uses=1] |
| 659 | zext i1 %69 to i8 ; <i8>:70 [#uses=1] |
| 660 | zext i8 %70 to i32 ; <i32>:71 [#uses=1] |
| 661 | store i32 %71, i32* @ui, align 4 |
| 662 | load i8* @uc, align 1 ; <i8>:72 [#uses=1] |
| 663 | zext i8 %72 to i32 ; <i32>:73 [#uses=2] |
| 664 | load i8* @sc, align 1 ; <i8>:74 [#uses=1] |
| 665 | sext i8 %74 to i32 ; <i32>:75 [#uses=1] |
| 666 | bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:76 [#uses=1] |
| 667 | call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %76, i32 %73, i32 %75 ) ; <i32>:77 [#uses=1] |
| 668 | icmp eq i32 %77, %73 ; <i1>:78 [#uses=1] |
| 669 | zext i1 %78 to i8 ; <i8>:79 [#uses=1] |
| 670 | zext i8 %79 to i32 ; <i32>:80 [#uses=1] |
| 671 | store i32 %80, i32* @ui, align 4 |
| 672 | load i8* @uc, align 1 ; <i8>:81 [#uses=1] |
| 673 | zext i8 %81 to i32 ; <i32>:82 [#uses=2] |
| 674 | load i8* @sc, align 1 ; <i8>:83 [#uses=1] |
| 675 | sext i8 %83 to i32 ; <i32>:84 [#uses=1] |
| 676 | bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:85 [#uses=1] |
| 677 | call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %85, i32 %82, i32 %84 ) ; <i32>:86 [#uses=1] |
| 678 | icmp eq i32 %86, %82 ; <i1>:87 [#uses=1] |
| 679 | zext i1 %87 to i8 ; <i8>:88 [#uses=1] |
| 680 | zext i8 %88 to i32 ; <i32>:89 [#uses=1] |
| 681 | store i32 %89, i32* @ui, align 4 |
| 682 | load i8* @uc, align 1 ; <i8>:90 [#uses=1] |
| 683 | zext i8 %90 to i64 ; <i64>:91 [#uses=2] |
| 684 | load i8* @sc, align 1 ; <i8>:92 [#uses=1] |
| 685 | sext i8 %92 to i64 ; <i64>:93 [#uses=1] |
| 686 | bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:94 [#uses=1] |
| 687 | call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %94, i64 %91, i64 %93 ) ; <i64>:95 [#uses=1] |
| 688 | icmp eq i64 %95, %91 ; <i1>:96 [#uses=1] |
| 689 | zext i1 %96 to i8 ; <i8>:97 [#uses=1] |
| 690 | zext i8 %97 to i32 ; <i32>:98 [#uses=1] |
| 691 | store i32 %98, i32* @ui, align 4 |
| 692 | load i8* @uc, align 1 ; <i8>:99 [#uses=1] |
| 693 | zext i8 %99 to i64 ; <i64>:100 [#uses=2] |
| 694 | load i8* @sc, align 1 ; <i8>:101 [#uses=1] |
| 695 | sext i8 %101 to i64 ; <i64>:102 [#uses=1] |
| 696 | bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:103 [#uses=1] |
| 697 | call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %103, i64 %100, i64 %102 ) ; <i64>:104 [#uses=1] |
| 698 | icmp eq i64 %104, %100 ; <i1>:105 [#uses=1] |
| 699 | zext i1 %105 to i8 ; <i8>:106 [#uses=1] |
| 700 | zext i8 %106 to i32 ; <i32>:107 [#uses=1] |
| 701 | store i32 %107, i32* @ui, align 4 |
| 702 | br label %return |
| 703 | |
| 704 | return: ; preds = %entry |
| 705 | ret void |
| 706 | } |
| 707 | |
| 708 | declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8*, i8, i8) nounwind |
| 709 | |
| 710 | declare i16 @llvm.atomic.cmp.swap.i16.p0i16(i16*, i16, i16) nounwind |
| 711 | |
| 712 | declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32*, i32, i32) nounwind |
| 713 | |
| 714 | declare i64 @llvm.atomic.cmp.swap.i64.p0i64(i64*, i64, i64) nounwind |
| 715 | |
| 716 | define void @test_lock() nounwind { |
| 717 | entry: |
| 718 | call i8 @llvm.atomic.swap.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:0 [#uses=1] |
| 719 | store i8 %0, i8* @sc, align 1 |
| 720 | call i8 @llvm.atomic.swap.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:1 [#uses=1] |
| 721 | store i8 %1, i8* @uc, align 1 |
| 722 | bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1] |
| 723 | call i16 @llvm.atomic.swap.i16.p0i16( i16* %2, i16 1 ) ; <i16>:3 [#uses=1] |
| 724 | store i16 %3, i16* @ss, align 2 |
| 725 | bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1] |
| 726 | call i16 @llvm.atomic.swap.i16.p0i16( i16* %4, i16 1 ) ; <i16>:5 [#uses=1] |
| 727 | store i16 %5, i16* @us, align 2 |
| 728 | bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1] |
| 729 | call i32 @llvm.atomic.swap.i32.p0i32( i32* %6, i32 1 ) ; <i32>:7 [#uses=1] |
| 730 | store i32 %7, i32* @si, align 4 |
| 731 | bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1] |
| 732 | call i32 @llvm.atomic.swap.i32.p0i32( i32* %8, i32 1 ) ; <i32>:9 [#uses=1] |
| 733 | store i32 %9, i32* @ui, align 4 |
| 734 | bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:10 [#uses=1] |
| 735 | call i64 @llvm.atomic.swap.i64.p0i64( i64* %10, i64 1 ) ; <i64>:11 [#uses=1] |
| 736 | store i64 %11, i64* @sl, align 8 |
| 737 | bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:12 [#uses=1] |
| 738 | call i64 @llvm.atomic.swap.i64.p0i64( i64* %12, i64 1 ) ; <i64>:13 [#uses=1] |
| 739 | store i64 %13, i64* @ul, align 8 |
| 740 | call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 true, i1 false ) |
| 741 | volatile store i8 0, i8* @sc, align 1 |
| 742 | volatile store i8 0, i8* @uc, align 1 |
| 743 | bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:14 [#uses=1] |
| 744 | volatile store i16 0, i16* %14, align 2 |
| 745 | bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:15 [#uses=1] |
| 746 | volatile store i16 0, i16* %15, align 2 |
| 747 | bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:16 [#uses=1] |
| 748 | volatile store i32 0, i32* %16, align 4 |
| 749 | bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:17 [#uses=1] |
| 750 | volatile store i32 0, i32* %17, align 4 |
| 751 | bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:18 [#uses=1] |
| 752 | volatile store i64 0, i64* %18, align 8 |
| 753 | bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:19 [#uses=1] |
| 754 | volatile store i64 0, i64* %19, align 8 |
| 755 | bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:20 [#uses=1] |
| 756 | volatile store i64 0, i64* %20, align 8 |
| 757 | bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:21 [#uses=1] |
| 758 | volatile store i64 0, i64* %21, align 8 |
| 759 | br label %return |
| 760 | |
| 761 | return: ; preds = %entry |
| 762 | ret void |
| 763 | } |
| 764 | |
| 765 | declare i8 @llvm.atomic.swap.i8.p0i8(i8*, i8) nounwind |
| 766 | |
| 767 | declare i16 @llvm.atomic.swap.i16.p0i16(i16*, i16) nounwind |
| 768 | |
| 769 | declare i32 @llvm.atomic.swap.i32.p0i32(i32*, i32) nounwind |
| 770 | |
| 771 | declare i64 @llvm.atomic.swap.i64.p0i64(i64*, i64) nounwind |
| 772 | |
| 773 | declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind |