blob: fe4791ec1937cd45a6b3fb5e23305615726803c3 [file] [log] [blame]
Robin Morisset22129962014-09-23 20:46:49 +00001; RUN: llc < %s -mtriple=powerpc-apple-darwin -march=ppc32 -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefix=PPC32
2; FIXME: -verify-machineinstrs currently fail on ppc64 (mismatched register/instruction).
3; This is already checked for in Atomics-64.ll
4; RUN: llc < %s -mtriple=powerpc-apple-darwin -march=ppc64 | FileCheck %s --check-prefix=CHECK --check-prefix=PPC64
5
6; FIXME: we don't currently check for the operations themselves with CHECK-NEXT,
7; because they are implemented in a very messy way with lwarx/stwcx.
8; It should be fixed soon in another patch.
9
10; We first check loads, for all sizes from i8 to i64.
11; We also vary orderings to check for barriers.
12define i8 @load_i8_unordered(i8* %mem) {
13; CHECK-LABEL: load_i8_unordered
Robin Morissete1ca44b2014-10-02 22:27:07 +000014; CHECK: lbz
Robin Morisset22129962014-09-23 20:46:49 +000015; CHECK-NOT: sync
David Blaikiea79ac142015-02-27 21:17:42 +000016 %val = load atomic i8, i8* %mem unordered, align 1
Robin Morisset22129962014-09-23 20:46:49 +000017 ret i8 %val
18}
19define i16 @load_i16_monotonic(i16* %mem) {
20; CHECK-LABEL: load_i16_monotonic
Robin Morissete1ca44b2014-10-02 22:27:07 +000021; CHECK: lhz
Robin Morisset22129962014-09-23 20:46:49 +000022; CHECK-NOT: sync
David Blaikiea79ac142015-02-27 21:17:42 +000023 %val = load atomic i16, i16* %mem monotonic, align 2
Robin Morisset22129962014-09-23 20:46:49 +000024 ret i16 %val
25}
26define i32 @load_i32_acquire(i32* %mem) {
27; CHECK-LABEL: load_i32_acquire
Robin Morissete1ca44b2014-10-02 22:27:07 +000028; CHECK: lwz
David Blaikiea79ac142015-02-27 21:17:42 +000029 %val = load atomic i32, i32* %mem acquire, align 4
Robin Morisset22129962014-09-23 20:46:49 +000030; CHECK: sync 1
31 ret i32 %val
32}
33define i64 @load_i64_seq_cst(i64* %mem) {
34; CHECK-LABEL: load_i64_seq_cst
35; CHECK: sync 0
Robin Morissete1ca44b2014-10-02 22:27:07 +000036; PPC32: __sync_
37; PPC64-NOT: __sync_
38; PPC64: ld
David Blaikiea79ac142015-02-27 21:17:42 +000039 %val = load atomic i64, i64* %mem seq_cst, align 8
Robin Morisset22129962014-09-23 20:46:49 +000040; CHECK: sync 1
41 ret i64 %val
42}
43
44; Stores
45define void @store_i8_unordered(i8* %mem) {
46; CHECK-LABEL: store_i8_unordered
47; CHECK-NOT: sync
Robin Morissete1ca44b2014-10-02 22:27:07 +000048; CHECK: stb
Robin Morisset22129962014-09-23 20:46:49 +000049 store atomic i8 42, i8* %mem unordered, align 1
50 ret void
51}
52define void @store_i16_monotonic(i16* %mem) {
53; CHECK-LABEL: store_i16_monotonic
54; CHECK-NOT: sync
Robin Morissete1ca44b2014-10-02 22:27:07 +000055; CHECK: sth
Robin Morisset22129962014-09-23 20:46:49 +000056 store atomic i16 42, i16* %mem monotonic, align 2
57 ret void
58}
59define void @store_i32_release(i32* %mem) {
60; CHECK-LABEL: store_i32_release
61; CHECK: sync 1
Robin Morissete1ca44b2014-10-02 22:27:07 +000062; CHECK: stw
Robin Morisset22129962014-09-23 20:46:49 +000063 store atomic i32 42, i32* %mem release, align 4
64 ret void
65}
66define void @store_i64_seq_cst(i64* %mem) {
67; CHECK-LABEL: store_i64_seq_cst
68; CHECK: sync 0
Robin Morissete1ca44b2014-10-02 22:27:07 +000069; PPC32: __sync_
70; PPC64-NOT: __sync_
71; PPC64: std
Robin Morisset22129962014-09-23 20:46:49 +000072 store atomic i64 42, i64* %mem seq_cst, align 8
73 ret void
74}
75
76; Atomic CmpXchg
77define i8 @cas_strong_i8_sc_sc(i8* %mem) {
78; CHECK-LABEL: cas_strong_i8_sc_sc
79; CHECK: sync 0
80 %val = cmpxchg i8* %mem, i8 0, i8 1 seq_cst seq_cst
81; CHECK: sync 1
82 %loaded = extractvalue { i8, i1} %val, 0
83 ret i8 %loaded
84}
85define i16 @cas_weak_i16_acquire_acquire(i16* %mem) {
86; CHECK-LABEL: cas_weak_i16_acquire_acquire
87;CHECK-NOT: sync
88 %val = cmpxchg weak i16* %mem, i16 0, i16 1 acquire acquire
89; CHECK: sync 1
90 %loaded = extractvalue { i16, i1} %val, 0
91 ret i16 %loaded
92}
93define i32 @cas_strong_i32_acqrel_acquire(i32* %mem) {
94; CHECK-LABEL: cas_strong_i32_acqrel_acquire
95; CHECK: sync 1
96 %val = cmpxchg i32* %mem, i32 0, i32 1 acq_rel acquire
97; CHECK: sync 1
98 %loaded = extractvalue { i32, i1} %val, 0
99 ret i32 %loaded
100}
101define i64 @cas_weak_i64_release_monotonic(i64* %mem) {
102; CHECK-LABEL: cas_weak_i64_release_monotonic
103; CHECK: sync 1
104 %val = cmpxchg weak i64* %mem, i64 0, i64 1 release monotonic
105; CHECK-NOT: [sync ]
106 %loaded = extractvalue { i64, i1} %val, 0
107 ret i64 %loaded
108}
109
110; AtomicRMW
111define i8 @add_i8_monotonic(i8* %mem, i8 %operand) {
112; CHECK-LABEL: add_i8_monotonic
113; CHECK-NOT: sync
114 %val = atomicrmw add i8* %mem, i8 %operand monotonic
115 ret i8 %val
116}
117define i16 @xor_i16_seq_cst(i16* %mem, i16 %operand) {
118; CHECK-LABEL: xor_i16_seq_cst
119; CHECK: sync 0
120 %val = atomicrmw xor i16* %mem, i16 %operand seq_cst
121; CHECK: sync 1
122 ret i16 %val
123}
124define i32 @xchg_i32_acq_rel(i32* %mem, i32 %operand) {
125; CHECK-LABEL: xchg_i32_acq_rel
126; CHECK: sync 1
127 %val = atomicrmw xchg i32* %mem, i32 %operand acq_rel
128; CHECK: sync 1
129 ret i32 %val
130}
131define i64 @and_i64_release(i64* %mem, i64 %operand) {
132; CHECK-LABEL: and_i64_release
133; CHECK: sync 1
134 %val = atomicrmw and i64* %mem, i64 %operand release
135; CHECK-NOT: [sync ]
136 ret i64 %val
137}