Tim Northover | 5d72c5d | 2014-10-01 19:21:03 +0000 | [diff] [blame] | 1 | ; RUN: llc -mtriple=armv7s-apple-ios7.0 -show-mc-encoding %s -o - | FileCheck %s --check-prefix=CHECK-ARM |
| 2 | ; RUN: llc -mtriple=thumbv7s-apple-ios7.0 -show-mc-encoding %s -o - | FileCheck %s --check-prefix=CHECK-THUMB |
| 3 | ; RUN: llc -mtriple=thumbv7m-none-eabi -show-mc-encoding %s -o - | FileCheck %s --check-prefix=CHECK-THUMB |
| 4 | |
| 5 | ; In the ARM backend, most compares are glued to their uses so CPSR can't |
| 6 | ; escape. However, for long ADCS chains (and last ditch fallback) the dependency |
| 7 | ; is carried in the DAG because duplicating them can be more expensive than |
| 8 | ; copying CPSR. |
| 9 | |
| 10 | ; Crafting a test for this was a little tricky, in case it breaks here are some |
| 11 | ; notes on what I was tring to achieve: |
| 12 | ; + We want 2 long ADCS chains |
| 13 | ; + We want them to split after an initial common prefix (so that a single |
| 14 | ; CPSR is used twice). |
| 15 | ; + We want both chains to write CPSR post-split (so that the copy can't be |
| 16 | ; elided). |
| 17 | ; + We want the chains to be long enough that duplicating them is expensive. |
| 18 | |
| 19 | define void @test_copy_cpsr(i128 %lhs, i128 %rhs, i128* %addr) { |
| 20 | ; CHECK-ARM: test_copy_cpsr: |
| 21 | ; CHECK-THUMB: test_copy_cpsr: |
| 22 | |
| 23 | ; CHECK-ARM: mrs [[TMP:r[0-9]+]], apsr @ encoding: [0x00,0x{{[0-9a-f]}}0,0x0f,0xe1] |
| 24 | ; CHECK-ARM: msr APSR_nzcvq, [[TMP]] @ encoding: [0x0{{[0-9a-f]}},0xf0,0x28,0xe1] |
| 25 | |
| 26 | ; In Thumb mode v7M and v7AR have different MRS/MSR instructions that happen |
| 27 | ; to overlap for the apsr case, so it's definitely worth checking both. |
| 28 | ; CHECK-THUMB: mrs [[TMP:r[0-9]+]], apsr @ encoding: [0xef,0xf3,0x00,0x8{{[0-9a-f]}}] |
| 29 | ; CHECK-THUMB: msr {{APSR|apsr}}_nzcvq, [[TMP]] @ encoding: [0x8{{[0-9a-f]}},0xf3,0x00,0x88] |
| 30 | |
| 31 | %sum = add i128 %lhs, %rhs |
| 32 | store volatile i128 %sum, i128* %addr |
| 33 | |
| 34 | %rhs2.tmp1 = trunc i128 %rhs to i64 |
| 35 | %rhs2 = zext i64 %rhs2.tmp1 to i128 |
| 36 | |
| 37 | %sum2 = add i128 %lhs, %rhs2 |
| 38 | store volatile i128 %sum2, i128* %addr |
| 39 | |
| 40 | ret void |
| 41 | } |