JF Bastien | 986ed68 | 2015-10-13 00:28:47 +0000 | [diff] [blame] | 1 | ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -verify-machineinstrs | FileCheck %s |
| 2 | ; RUN: llc < %s -mtriple=i686-unknown-unknown -verify-machineinstrs | FileCheck %s |
| 3 | |
| 4 | ; Make sure that flags are properly preserved despite atomic optimizations. |
| 5 | |
| 6 | define i32 @atomic_and_flags(i8* %p, i32 %a, i32 %b) { |
| 7 | ; CHECK-LABEL: atomic_and_flags: |
| 8 | |
| 9 | ; Generate flags value, and use it. |
| 10 | ; CHECK: cmpl |
| 11 | ; CHECK-NEXT: jne |
| 12 | %cmp = icmp eq i32 %a, %b |
| 13 | br i1 %cmp, label %L1, label %L2 |
| 14 | |
| 15 | L1: |
| 16 | ; The following pattern will get folded. |
| 17 | ; CHECK: addb |
| 18 | %1 = load atomic i8, i8* %p seq_cst, align 1 |
| 19 | %2 = add i8 %1, 2 |
| 20 | store atomic i8 %2, i8* %p release, align 1 |
| 21 | |
| 22 | ; Use the comparison result again. We need to rematerialize the comparison |
| 23 | ; somehow. This test checks that cmpl gets emitted again, but any |
| 24 | ; rematerialization would work (the optimizer used to clobber the flags with |
| 25 | ; the add). |
| 26 | ; CHECK-NEXT: cmpl |
| 27 | ; CHECK-NEXT: jne |
| 28 | br i1 %cmp, label %L3, label %L4 |
| 29 | |
| 30 | L2: |
| 31 | ret i32 2 |
| 32 | |
| 33 | L3: |
| 34 | ret i32 3 |
| 35 | |
| 36 | L4: |
| 37 | ret i32 4 |
| 38 | } |