Chris Lattner | b7920f1 | 2010-04-15 04:48:01 +0000 | [diff] [blame] | 1 | ; rdar://7860110 |
Chris Lattner | 01f604a | 2010-04-15 05:40:59 +0000 | [diff] [blame] | 2 | ; RUN: llc < %s | FileCheck %s -check-prefix=X64 |
| 3 | ; RUN: llc -march=x86 < %s | FileCheck %s -check-prefix=X32 |
Chris Lattner | b7920f1 | 2010-04-15 04:48:01 +0000 | [diff] [blame] | 4 | target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" |
| 5 | target triple = "x86_64-apple-darwin10.2" |
| 6 | |
| 7 | define void @test1(i32* nocapture %a0, i8 zeroext %a1) nounwind ssp { |
| 8 | entry: |
| 9 | %A = load i32* %a0, align 4 |
| 10 | %B = and i32 %A, -256 ; 0xFFFFFF00 |
| 11 | %C = zext i8 %a1 to i32 |
| 12 | %D = or i32 %C, %B |
| 13 | store i32 %D, i32* %a0, align 4 |
| 14 | ret void |
| 15 | |
Chris Lattner | 01f604a | 2010-04-15 05:40:59 +0000 | [diff] [blame] | 16 | ; X64: test1: |
| 17 | ; X64: movb %sil, (%rdi) |
| 18 | |
| 19 | ; X32: test1: |
| 20 | ; X32: movb 8(%esp), %al |
| 21 | ; X32: movb %al, (%{{.*}}) |
Chris Lattner | b7920f1 | 2010-04-15 04:48:01 +0000 | [diff] [blame] | 22 | } |
| 23 | |
| 24 | define void @test2(i32* nocapture %a0, i8 zeroext %a1) nounwind ssp { |
| 25 | entry: |
| 26 | %A = load i32* %a0, align 4 |
| 27 | %B = and i32 %A, -65281 ; 0xFFFF00FF |
| 28 | %C = zext i8 %a1 to i32 |
| 29 | %CS = shl i32 %C, 8 |
| 30 | %D = or i32 %B, %CS |
| 31 | store i32 %D, i32* %a0, align 4 |
| 32 | ret void |
Chris Lattner | 01f604a | 2010-04-15 05:40:59 +0000 | [diff] [blame] | 33 | ; X64: test2: |
| 34 | ; X64: movb %sil, 1(%rdi) |
| 35 | |
| 36 | ; X32: test2: |
| 37 | ; X32: movb 8(%esp), %al |
| 38 | ; X32: movb %al, 1(%{{.*}}) |
Chris Lattner | b7920f1 | 2010-04-15 04:48:01 +0000 | [diff] [blame] | 39 | } |
| 40 | |
| 41 | define void @test3(i32* nocapture %a0, i16 zeroext %a1) nounwind ssp { |
| 42 | entry: |
| 43 | %A = load i32* %a0, align 4 |
| 44 | %B = and i32 %A, -65536 ; 0xFFFF0000 |
| 45 | %C = zext i16 %a1 to i32 |
| 46 | %D = or i32 %B, %C |
| 47 | store i32 %D, i32* %a0, align 4 |
| 48 | ret void |
Chris Lattner | 01f604a | 2010-04-15 05:40:59 +0000 | [diff] [blame] | 49 | ; X64: test3: |
| 50 | ; X64: movw %si, (%rdi) |
| 51 | |
| 52 | ; X32: test3: |
| 53 | ; X32: movw 8(%esp), %ax |
| 54 | ; X32: movw %ax, (%{{.*}}) |
Chris Lattner | b7920f1 | 2010-04-15 04:48:01 +0000 | [diff] [blame] | 55 | } |
| 56 | |
| 57 | define void @test4(i32* nocapture %a0, i16 zeroext %a1) nounwind ssp { |
| 58 | entry: |
| 59 | %A = load i32* %a0, align 4 |
| 60 | %B = and i32 %A, 65535 ; 0x0000FFFF |
| 61 | %C = zext i16 %a1 to i32 |
| 62 | %CS = shl i32 %C, 16 |
| 63 | %D = or i32 %B, %CS |
| 64 | store i32 %D, i32* %a0, align 4 |
| 65 | ret void |
Chris Lattner | 01f604a | 2010-04-15 05:40:59 +0000 | [diff] [blame] | 66 | ; X64: test4: |
| 67 | ; X64: movw %si, 2(%rdi) |
| 68 | |
| 69 | ; X32: test4: |
Evan Cheng | ab62530 | 2010-04-28 08:30:49 +0000 | [diff] [blame] | 70 | ; X32: movzwl 8(%esp), %eax |
Chris Lattner | 01f604a | 2010-04-15 05:40:59 +0000 | [diff] [blame] | 71 | ; X32: movw %ax, 2(%{{.*}}) |
Chris Lattner | b7920f1 | 2010-04-15 04:48:01 +0000 | [diff] [blame] | 72 | } |
| 73 | |
| 74 | define void @test5(i64* nocapture %a0, i16 zeroext %a1) nounwind ssp { |
| 75 | entry: |
| 76 | %A = load i64* %a0, align 4 |
| 77 | %B = and i64 %A, -4294901761 ; 0xFFFFFFFF0000FFFF |
| 78 | %C = zext i16 %a1 to i64 |
| 79 | %CS = shl i64 %C, 16 |
| 80 | %D = or i64 %B, %CS |
| 81 | store i64 %D, i64* %a0, align 4 |
| 82 | ret void |
Chris Lattner | 01f604a | 2010-04-15 05:40:59 +0000 | [diff] [blame] | 83 | ; X64: test5: |
| 84 | ; X64: movw %si, 2(%rdi) |
| 85 | |
| 86 | ; X32: test5: |
Evan Cheng | ab62530 | 2010-04-28 08:30:49 +0000 | [diff] [blame] | 87 | ; X32: movzwl 8(%esp), %eax |
Chris Lattner | 01f604a | 2010-04-15 05:40:59 +0000 | [diff] [blame] | 88 | ; X32: movw %ax, 2(%{{.*}}) |
Chris Lattner | b7920f1 | 2010-04-15 04:48:01 +0000 | [diff] [blame] | 89 | } |
| 90 | |
| 91 | define void @test6(i64* nocapture %a0, i8 zeroext %a1) nounwind ssp { |
| 92 | entry: |
| 93 | %A = load i64* %a0, align 4 |
| 94 | %B = and i64 %A, -280375465082881 ; 0xFFFF00FFFFFFFFFF |
| 95 | %C = zext i8 %a1 to i64 |
| 96 | %CS = shl i64 %C, 40 |
| 97 | %D = or i64 %B, %CS |
| 98 | store i64 %D, i64* %a0, align 4 |
| 99 | ret void |
Chris Lattner | 01f604a | 2010-04-15 05:40:59 +0000 | [diff] [blame] | 100 | ; X64: test6: |
| 101 | ; X64: movb %sil, 5(%rdi) |
| 102 | |
| 103 | |
| 104 | ; X32: test6: |
| 105 | ; X32: movb 8(%esp), %al |
| 106 | ; X32: movb %al, 5(%{{.*}}) |
Chris Lattner | b7920f1 | 2010-04-15 04:48:01 +0000 | [diff] [blame] | 107 | } |
Chris Lattner | f22ee1a | 2010-04-15 06:10:49 +0000 | [diff] [blame] | 108 | |
| 109 | define i32 @test7(i64* nocapture %a0, i8 zeroext %a1, i32* %P2) nounwind { |
| 110 | entry: |
| 111 | %OtherLoad = load i32 *%P2 |
| 112 | %A = load i64* %a0, align 4 |
| 113 | %B = and i64 %A, -280375465082881 ; 0xFFFF00FFFFFFFFFF |
| 114 | %C = zext i8 %a1 to i64 |
| 115 | %CS = shl i64 %C, 40 |
| 116 | %D = or i64 %B, %CS |
| 117 | store i64 %D, i64* %a0, align 4 |
| 118 | ret i32 %OtherLoad |
| 119 | ; X64: test7: |
| 120 | ; X64: movb %sil, 5(%rdi) |
| 121 | |
| 122 | |
| 123 | ; X32: test7: |
| 124 | ; X32: movb 8(%esp), %cl |
| 125 | ; X32: movb %cl, 5(%{{.*}}) |
| 126 | } |
| 127 | |