blob: b1100fa960c0fda78a3b0b388e57c8f15d7ec0aa [file] [log] [blame]
Chris Lattnerb7920f12010-04-15 04:48:01 +00001; rdar://7860110
Chris Lattner01f604a2010-04-15 05:40:59 +00002; RUN: llc < %s | FileCheck %s -check-prefix=X64
3; RUN: llc -march=x86 < %s | FileCheck %s -check-prefix=X32
Chris Lattnerb7920f12010-04-15 04:48:01 +00004target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
5target triple = "x86_64-apple-darwin10.2"
6
7define void @test1(i32* nocapture %a0, i8 zeroext %a1) nounwind ssp {
8entry:
9 %A = load i32* %a0, align 4
10 %B = and i32 %A, -256 ; 0xFFFFFF00
11 %C = zext i8 %a1 to i32
12 %D = or i32 %C, %B
13 store i32 %D, i32* %a0, align 4
14 ret void
15
Chris Lattner01f604a2010-04-15 05:40:59 +000016; X64: test1:
17; X64: movb %sil, (%rdi)
18
19; X32: test1:
20; X32: movb 8(%esp), %al
21; X32: movb %al, (%{{.*}})
Chris Lattnerb7920f12010-04-15 04:48:01 +000022}
23
24define void @test2(i32* nocapture %a0, i8 zeroext %a1) nounwind ssp {
25entry:
26 %A = load i32* %a0, align 4
27 %B = and i32 %A, -65281 ; 0xFFFF00FF
28 %C = zext i8 %a1 to i32
29 %CS = shl i32 %C, 8
30 %D = or i32 %B, %CS
31 store i32 %D, i32* %a0, align 4
32 ret void
Chris Lattner01f604a2010-04-15 05:40:59 +000033; X64: test2:
34; X64: movb %sil, 1(%rdi)
35
36; X32: test2:
37; X32: movb 8(%esp), %al
38; X32: movb %al, 1(%{{.*}})
Chris Lattnerb7920f12010-04-15 04:48:01 +000039}
40
41define void @test3(i32* nocapture %a0, i16 zeroext %a1) nounwind ssp {
42entry:
43 %A = load i32* %a0, align 4
44 %B = and i32 %A, -65536 ; 0xFFFF0000
45 %C = zext i16 %a1 to i32
46 %D = or i32 %B, %C
47 store i32 %D, i32* %a0, align 4
48 ret void
Chris Lattner01f604a2010-04-15 05:40:59 +000049; X64: test3:
50; X64: movw %si, (%rdi)
51
52; X32: test3:
53; X32: movw 8(%esp), %ax
54; X32: movw %ax, (%{{.*}})
Chris Lattnerb7920f12010-04-15 04:48:01 +000055}
56
57define void @test4(i32* nocapture %a0, i16 zeroext %a1) nounwind ssp {
58entry:
59 %A = load i32* %a0, align 4
60 %B = and i32 %A, 65535 ; 0x0000FFFF
61 %C = zext i16 %a1 to i32
62 %CS = shl i32 %C, 16
63 %D = or i32 %B, %CS
64 store i32 %D, i32* %a0, align 4
65 ret void
Chris Lattner01f604a2010-04-15 05:40:59 +000066; X64: test4:
67; X64: movw %si, 2(%rdi)
68
69; X32: test4:
Evan Chengab625302010-04-28 08:30:49 +000070; X32: movzwl 8(%esp), %eax
Chris Lattner01f604a2010-04-15 05:40:59 +000071; X32: movw %ax, 2(%{{.*}})
Chris Lattnerb7920f12010-04-15 04:48:01 +000072}
73
74define void @test5(i64* nocapture %a0, i16 zeroext %a1) nounwind ssp {
75entry:
76 %A = load i64* %a0, align 4
77 %B = and i64 %A, -4294901761 ; 0xFFFFFFFF0000FFFF
78 %C = zext i16 %a1 to i64
79 %CS = shl i64 %C, 16
80 %D = or i64 %B, %CS
81 store i64 %D, i64* %a0, align 4
82 ret void
Chris Lattner01f604a2010-04-15 05:40:59 +000083; X64: test5:
84; X64: movw %si, 2(%rdi)
85
86; X32: test5:
Evan Chengab625302010-04-28 08:30:49 +000087; X32: movzwl 8(%esp), %eax
Chris Lattner01f604a2010-04-15 05:40:59 +000088; X32: movw %ax, 2(%{{.*}})
Chris Lattnerb7920f12010-04-15 04:48:01 +000089}
90
91define void @test6(i64* nocapture %a0, i8 zeroext %a1) nounwind ssp {
92entry:
93 %A = load i64* %a0, align 4
94 %B = and i64 %A, -280375465082881 ; 0xFFFF00FFFFFFFFFF
95 %C = zext i8 %a1 to i64
96 %CS = shl i64 %C, 40
97 %D = or i64 %B, %CS
98 store i64 %D, i64* %a0, align 4
99 ret void
Chris Lattner01f604a2010-04-15 05:40:59 +0000100; X64: test6:
101; X64: movb %sil, 5(%rdi)
102
103
104; X32: test6:
105; X32: movb 8(%esp), %al
106; X32: movb %al, 5(%{{.*}})
Chris Lattnerb7920f12010-04-15 04:48:01 +0000107}
Chris Lattnerf22ee1a2010-04-15 06:10:49 +0000108
109define i32 @test7(i64* nocapture %a0, i8 zeroext %a1, i32* %P2) nounwind {
110entry:
111 %OtherLoad = load i32 *%P2
112 %A = load i64* %a0, align 4
113 %B = and i64 %A, -280375465082881 ; 0xFFFF00FFFFFFFFFF
114 %C = zext i8 %a1 to i64
115 %CS = shl i64 %C, 40
116 %D = or i64 %B, %CS
117 store i64 %D, i64* %a0, align 4
118 ret i32 %OtherLoad
119; X64: test7:
120; X64: movb %sil, 5(%rdi)
121
122
123; X32: test7:
124; X32: movb 8(%esp), %cl
125; X32: movb %cl, 5(%{{.*}})
126}
127