blob: d9e1111ab9cdbfc833b58f655e08233485e0ed24 [file] [log] [blame]
Artur Pilipenko41c00052017-01-25 08:53:31 +00001; RUN: llc < %s -mtriple=arm64-unknown | FileCheck %s
2
3; i8* p; // p is 1 byte aligned
4; (i32) p[0] | ((i32) p[1] << 8) | ((i32) p[2] << 16) | ((i32) p[3] << 24)
5define i32 @load_i32_by_i8_unaligned(i32* %arg) {
6; CHECK-LABEL: load_i32_by_i8_unaligned:
7; CHECK: ldr w0, [x0]
8; CHECK-NEXT: ret
9 %tmp = bitcast i32* %arg to i8*
10 %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
11 %tmp2 = load i8, i8* %tmp, align 1
12 %tmp3 = zext i8 %tmp2 to i32
13 %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
14 %tmp5 = load i8, i8* %tmp4, align 1
15 %tmp6 = zext i8 %tmp5 to i32
16 %tmp7 = shl nuw nsw i32 %tmp6, 8
17 %tmp8 = or i32 %tmp7, %tmp3
18 %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 2
19 %tmp10 = load i8, i8* %tmp9, align 1
20 %tmp11 = zext i8 %tmp10 to i32
21 %tmp12 = shl nuw nsw i32 %tmp11, 16
22 %tmp13 = or i32 %tmp8, %tmp12
23 %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 3
24 %tmp15 = load i8, i8* %tmp14, align 1
25 %tmp16 = zext i8 %tmp15 to i32
26 %tmp17 = shl nuw nsw i32 %tmp16, 24
27 %tmp18 = or i32 %tmp13, %tmp17
28 ret i32 %tmp18
29}
30
31; i8* p; // p is 4 byte aligned
32; (i32) p[0] | ((i32) p[1] << 8) | ((i32) p[2] << 16) | ((i32) p[3] << 24)
33define i32 @load_i32_by_i8_aligned(i32* %arg) {
34; CHECK-LABEL: load_i32_by_i8_aligned:
35; CHECK: ldr w0, [x0]
36; CHECK-NEXT: ret
37 %tmp = bitcast i32* %arg to i8*
38 %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 0
39 %tmp2 = load i8, i8* %tmp, align 4
40 %tmp3 = zext i8 %tmp2 to i32
41 %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
42 %tmp5 = load i8, i8* %tmp4, align 1
43 %tmp6 = zext i8 %tmp5 to i32
44 %tmp7 = shl nuw nsw i32 %tmp6, 8
45 %tmp8 = or i32 %tmp7, %tmp3
46 %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 2
47 %tmp10 = load i8, i8* %tmp9, align 1
48 %tmp11 = zext i8 %tmp10 to i32
49 %tmp12 = shl nuw nsw i32 %tmp11, 16
50 %tmp13 = or i32 %tmp8, %tmp12
51 %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 3
52 %tmp15 = load i8, i8* %tmp14, align 1
53 %tmp16 = zext i8 %tmp15 to i32
54 %tmp17 = shl nuw nsw i32 %tmp16, 24
55 %tmp18 = or i32 %tmp13, %tmp17
56 ret i32 %tmp18
57}
58
59; i8* p; // p is 4 byte aligned
60; ((i32) p[0] << 24) | ((i32) p[1] << 16) | ((i32) p[2] << 8) | (i32) p[3]
61define i32 @load_i32_by_i8_bswap(i32* %arg) {
62; CHECK-LABEL: load_i32_by_i8_bswap:
63; CHECK: ldr w8, [x0]
64; CHECK-NEXT: rev w0, w8
65; CHECK-NEXT: ret
66 %tmp = bitcast i32* %arg to i8*
67 %tmp1 = load i8, i8* %tmp, align 4
68 %tmp2 = zext i8 %tmp1 to i32
69 %tmp3 = shl nuw nsw i32 %tmp2, 24
70 %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 1
71 %tmp5 = load i8, i8* %tmp4, align 1
72 %tmp6 = zext i8 %tmp5 to i32
73 %tmp7 = shl nuw nsw i32 %tmp6, 16
74 %tmp8 = or i32 %tmp7, %tmp3
75 %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 2
76 %tmp10 = load i8, i8* %tmp9, align 1
77 %tmp11 = zext i8 %tmp10 to i32
78 %tmp12 = shl nuw nsw i32 %tmp11, 8
79 %tmp13 = or i32 %tmp8, %tmp12
80 %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 3
81 %tmp15 = load i8, i8* %tmp14, align 1
82 %tmp16 = zext i8 %tmp15 to i32
83 %tmp17 = or i32 %tmp13, %tmp16
84 ret i32 %tmp17
85}
86
87; i8* p; // p is 8 byte aligned
88; (i64) p[0] | ((i64) p[1] << 8) | ((i64) p[2] << 16) | ((i64) p[3] << 24) | ((i64) p[4] << 32) | ((i64) p[5] << 40) | ((i64) p[6] << 48) | ((i64) p[7] << 56)
89define i64 @load_i64_by_i8(i64* %arg) {
90; CHECK-LABEL: load_i64_by_i8:
91; CHECK: ldr x0, [x0]
92; CHECK-NEXT: ret
93 %tmp = bitcast i64* %arg to i8*
94 %tmp1 = load i8, i8* %tmp, align 8
95 %tmp2 = zext i8 %tmp1 to i64
96 %tmp3 = getelementptr inbounds i8, i8* %tmp, i64 1
97 %tmp4 = load i8, i8* %tmp3, align 1
98 %tmp5 = zext i8 %tmp4 to i64
99 %tmp6 = shl nuw nsw i64 %tmp5, 8
100 %tmp7 = or i64 %tmp6, %tmp2
101 %tmp8 = getelementptr inbounds i8, i8* %tmp, i64 2
102 %tmp9 = load i8, i8* %tmp8, align 1
103 %tmp10 = zext i8 %tmp9 to i64
104 %tmp11 = shl nuw nsw i64 %tmp10, 16
105 %tmp12 = or i64 %tmp7, %tmp11
106 %tmp13 = getelementptr inbounds i8, i8* %tmp, i64 3
107 %tmp14 = load i8, i8* %tmp13, align 1
108 %tmp15 = zext i8 %tmp14 to i64
109 %tmp16 = shl nuw nsw i64 %tmp15, 24
110 %tmp17 = or i64 %tmp12, %tmp16
111 %tmp18 = getelementptr inbounds i8, i8* %tmp, i64 4
112 %tmp19 = load i8, i8* %tmp18, align 1
113 %tmp20 = zext i8 %tmp19 to i64
114 %tmp21 = shl nuw nsw i64 %tmp20, 32
115 %tmp22 = or i64 %tmp17, %tmp21
116 %tmp23 = getelementptr inbounds i8, i8* %tmp, i64 5
117 %tmp24 = load i8, i8* %tmp23, align 1
118 %tmp25 = zext i8 %tmp24 to i64
119 %tmp26 = shl nuw nsw i64 %tmp25, 40
120 %tmp27 = or i64 %tmp22, %tmp26
121 %tmp28 = getelementptr inbounds i8, i8* %tmp, i64 6
122 %tmp29 = load i8, i8* %tmp28, align 1
123 %tmp30 = zext i8 %tmp29 to i64
124 %tmp31 = shl nuw nsw i64 %tmp30, 48
125 %tmp32 = or i64 %tmp27, %tmp31
126 %tmp33 = getelementptr inbounds i8, i8* %tmp, i64 7
127 %tmp34 = load i8, i8* %tmp33, align 1
128 %tmp35 = zext i8 %tmp34 to i64
129 %tmp36 = shl nuw i64 %tmp35, 56
130 %tmp37 = or i64 %tmp32, %tmp36
131 ret i64 %tmp37
132}
133
134; i8* p; // p is 8 byte aligned
135; ((i64) p[0] << 56) | ((i64) p[1] << 48) | ((i64) p[2] << 40) | ((i64) p[3] << 32) | ((i64) p[4] << 24) | ((i64) p[5] << 16) | ((i64) p[6] << 8) | (i64) p[7]
136define i64 @load_i64_by_i8_bswap(i64* %arg) {
137; CHECK-LABEL: load_i64_by_i8_bswap:
138; CHECK: ldr x8, [x0]
139; CHECK-NEXT: rev x0, x8
140; CHECK-NEXT: ret
141 %tmp = bitcast i64* %arg to i8*
142 %tmp1 = load i8, i8* %tmp, align 8
143 %tmp2 = zext i8 %tmp1 to i64
144 %tmp3 = shl nuw i64 %tmp2, 56
145 %tmp4 = getelementptr inbounds i8, i8* %tmp, i64 1
146 %tmp5 = load i8, i8* %tmp4, align 1
147 %tmp6 = zext i8 %tmp5 to i64
148 %tmp7 = shl nuw nsw i64 %tmp6, 48
149 %tmp8 = or i64 %tmp7, %tmp3
150 %tmp9 = getelementptr inbounds i8, i8* %tmp, i64 2
151 %tmp10 = load i8, i8* %tmp9, align 1
152 %tmp11 = zext i8 %tmp10 to i64
153 %tmp12 = shl nuw nsw i64 %tmp11, 40
154 %tmp13 = or i64 %tmp8, %tmp12
155 %tmp14 = getelementptr inbounds i8, i8* %tmp, i64 3
156 %tmp15 = load i8, i8* %tmp14, align 1
157 %tmp16 = zext i8 %tmp15 to i64
158 %tmp17 = shl nuw nsw i64 %tmp16, 32
159 %tmp18 = or i64 %tmp13, %tmp17
160 %tmp19 = getelementptr inbounds i8, i8* %tmp, i64 4
161 %tmp20 = load i8, i8* %tmp19, align 1
162 %tmp21 = zext i8 %tmp20 to i64
163 %tmp22 = shl nuw nsw i64 %tmp21, 24
164 %tmp23 = or i64 %tmp18, %tmp22
165 %tmp24 = getelementptr inbounds i8, i8* %tmp, i64 5
166 %tmp25 = load i8, i8* %tmp24, align 1
167 %tmp26 = zext i8 %tmp25 to i64
168 %tmp27 = shl nuw nsw i64 %tmp26, 16
169 %tmp28 = or i64 %tmp23, %tmp27
170 %tmp29 = getelementptr inbounds i8, i8* %tmp, i64 6
171 %tmp30 = load i8, i8* %tmp29, align 1
172 %tmp31 = zext i8 %tmp30 to i64
173 %tmp32 = shl nuw nsw i64 %tmp31, 8
174 %tmp33 = or i64 %tmp28, %tmp32
175 %tmp34 = getelementptr inbounds i8, i8* %tmp, i64 7
176 %tmp35 = load i8, i8* %tmp34, align 1
177 %tmp36 = zext i8 %tmp35 to i64
178 %tmp37 = or i64 %tmp33, %tmp36
179 ret i64 %tmp37
180}
Artur Pilipenkobdf3c5a2017-02-06 14:15:31 +0000181
182; i8* p; // p[1] is 4 byte aligned
183; (i32) p[1] | ((i32) p[2] << 8) | ((i32) p[3] << 16) | ((i32) p[4] << 24)
184define i32 @load_i32_by_i8_nonzero_offset(i32* %arg) {
185; CHECK-LABEL: load_i32_by_i8_nonzero_offset:
186; CHECK: ldrb w8, [x0, #1]
187; CHECK-NEXT: ldrb w9, [x0, #2]
188; CHECK-NEXT: ldrb w10, [x0, #3]
189; CHECK-NEXT: ldrb w11, [x0, #4]
190; CHECK-NEXT: bfi w8, w9, #8, #8
191; CHECK-NEXT: bfi w8, w10, #16, #8
192; CHECK-NEXT: bfi w8, w11, #24, #8
193; CHECK-NEXT: mov w0, w8
194; CHECK-NEXT: ret
195 %tmp = bitcast i32* %arg to i8*
196 %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 1
197 %tmp2 = load i8, i8* %tmp1, align 4
198 %tmp3 = zext i8 %tmp2 to i32
199 %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 2
200 %tmp5 = load i8, i8* %tmp4, align 1
201 %tmp6 = zext i8 %tmp5 to i32
202 %tmp7 = shl nuw nsw i32 %tmp6, 8
203 %tmp8 = or i32 %tmp7, %tmp3
204 %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 3
205 %tmp10 = load i8, i8* %tmp9, align 1
206 %tmp11 = zext i8 %tmp10 to i32
207 %tmp12 = shl nuw nsw i32 %tmp11, 16
208 %tmp13 = or i32 %tmp8, %tmp12
209 %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 4
210 %tmp15 = load i8, i8* %tmp14, align 1
211 %tmp16 = zext i8 %tmp15 to i32
212 %tmp17 = shl nuw nsw i32 %tmp16, 24
213 %tmp18 = or i32 %tmp13, %tmp17
214 ret i32 %tmp18
215}
216
217; i8* p; // p[-4] is 4 byte aligned
218; (i32) p[-4] | ((i32) p[-3] << 8) | ((i32) p[-2] << 16) | ((i32) p[-1] << 24)
219define i32 @load_i32_by_i8_neg_offset(i32* %arg) {
220; CHECK-LABEL: load_i32_by_i8_neg_offset:
221; CHECK: ldurb w8, [x0, #-4]
222; CHECK-NEXT: ldurb w9, [x0, #-3]
223; CHECK-NEXT: ldurb w10, [x0, #-2]
224; CHECK-NEXT: ldurb w11, [x0, #-1]
225; CHECK-NEXT: bfi w8, w9, #8, #8
226; CHECK-NEXT: bfi w8, w10, #16, #8
227; CHECK-NEXT: bfi w8, w11, #24, #8
228; CHECK-NEXT: mov w0, w8
229; CHECK-NEXT: ret
230 %tmp = bitcast i32* %arg to i8*
231 %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 -4
232 %tmp2 = load i8, i8* %tmp1, align 4
233 %tmp3 = zext i8 %tmp2 to i32
234 %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 -3
235 %tmp5 = load i8, i8* %tmp4, align 1
236 %tmp6 = zext i8 %tmp5 to i32
237 %tmp7 = shl nuw nsw i32 %tmp6, 8
238 %tmp8 = or i32 %tmp7, %tmp3
239 %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 -2
240 %tmp10 = load i8, i8* %tmp9, align 1
241 %tmp11 = zext i8 %tmp10 to i32
242 %tmp12 = shl nuw nsw i32 %tmp11, 16
243 %tmp13 = or i32 %tmp8, %tmp12
244 %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 -1
245 %tmp15 = load i8, i8* %tmp14, align 1
246 %tmp16 = zext i8 %tmp15 to i32
247 %tmp17 = shl nuw nsw i32 %tmp16, 24
248 %tmp18 = or i32 %tmp13, %tmp17
249 ret i32 %tmp18
250}
251
252; i8* p; // p[1] is 4 byte aligned
253; (i32) p[4] | ((i32) p[3] << 8) | ((i32) p[2] << 16) | ((i32) p[1] << 24)
254define i32 @load_i32_by_i8_nonzero_offset_bswap(i32* %arg) {
255; CHECK-LABEL: load_i32_by_i8_nonzero_offset_bswap:
256; CHECK: ldrb w8, [x0, #4]
257; CHECK-NEXT: ldrb w9, [x0, #3]
258; CHECK-NEXT: ldrb w10, [x0, #2]
259; CHECK-NEXT: ldrb w11, [x0, #1]
260; CHECK-NEXT: bfi w8, w9, #8, #8
261; CHECK-NEXT: bfi w8, w10, #16, #8
262; CHECK-NEXT: bfi w8, w11, #24, #8
263; CHECK-NEXT: mov w0, w8
264; CHECK-NEXT: ret
265 %tmp = bitcast i32* %arg to i8*
266 %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 4
267 %tmp2 = load i8, i8* %tmp1, align 1
268 %tmp3 = zext i8 %tmp2 to i32
269 %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 3
270 %tmp5 = load i8, i8* %tmp4, align 1
271 %tmp6 = zext i8 %tmp5 to i32
272 %tmp7 = shl nuw nsw i32 %tmp6, 8
273 %tmp8 = or i32 %tmp7, %tmp3
274 %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 2
275 %tmp10 = load i8, i8* %tmp9, align 1
276 %tmp11 = zext i8 %tmp10 to i32
277 %tmp12 = shl nuw nsw i32 %tmp11, 16
278 %tmp13 = or i32 %tmp8, %tmp12
279 %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 1
280 %tmp15 = load i8, i8* %tmp14, align 4
281 %tmp16 = zext i8 %tmp15 to i32
282 %tmp17 = shl nuw nsw i32 %tmp16, 24
283 %tmp18 = or i32 %tmp13, %tmp17
284 ret i32 %tmp18
285}
286
287; i8* p; // p[-4] is 4 byte aligned
288; (i32) p[-1] | ((i32) p[-2] << 8) | ((i32) p[-3] << 16) | ((i32) p[-4] << 24)
289define i32 @load_i32_by_i8_neg_offset_bswap(i32* %arg) {
290; CHECK-LABEL: load_i32_by_i8_neg_offset_bswap:
291; CHECK: ldurb w8, [x0, #-1]
292; CHECK-NEXT: ldurb w9, [x0, #-2]
293; CHECK-NEXT: ldurb w10, [x0, #-3]
294; CHECK-NEXT: ldurb w11, [x0, #-4]
295; CHECK-NEXT: bfi w8, w9, #8, #8
296; CHECK-NEXT: bfi w8, w10, #16, #8
297; CHECK-NEXT: bfi w8, w11, #24, #8
298; CHECK-NEXT: mov w0, w8
299; CHECK-NEXT: ret
300 %tmp = bitcast i32* %arg to i8*
301 %tmp1 = getelementptr inbounds i8, i8* %tmp, i32 -1
302 %tmp2 = load i8, i8* %tmp1, align 1
303 %tmp3 = zext i8 %tmp2 to i32
304 %tmp4 = getelementptr inbounds i8, i8* %tmp, i32 -2
305 %tmp5 = load i8, i8* %tmp4, align 1
306 %tmp6 = zext i8 %tmp5 to i32
307 %tmp7 = shl nuw nsw i32 %tmp6, 8
308 %tmp8 = or i32 %tmp7, %tmp3
309 %tmp9 = getelementptr inbounds i8, i8* %tmp, i32 -3
310 %tmp10 = load i8, i8* %tmp9, align 1
311 %tmp11 = zext i8 %tmp10 to i32
312 %tmp12 = shl nuw nsw i32 %tmp11, 16
313 %tmp13 = or i32 %tmp8, %tmp12
314 %tmp14 = getelementptr inbounds i8, i8* %tmp, i32 -4
315 %tmp15 = load i8, i8* %tmp14, align 4
316 %tmp16 = zext i8 %tmp15 to i32
317 %tmp17 = shl nuw nsw i32 %tmp16, 24
318 %tmp18 = or i32 %tmp13, %tmp17
319 ret i32 %tmp18
320}
Artur Pilipenkod3464bf2017-02-06 17:48:08 +0000321
322declare i16 @llvm.bswap.i16(i16)
323
324; i16* p; // p is 4 byte aligned
325; (i32) bswap(p[1]) | (i32) bswap(p[0] << 16)
326define i32 @load_i32_by_bswap_i16(i32* %arg) {
327; CHECK-LABEL: load_i32_by_bswap_i16:
328; CHECK: ldr w8, [x0]
329; CHECK-NEXT: rev w0, w8
330; CHECK-NEXT: ret
331
332 %tmp = bitcast i32* %arg to i16*
333 %tmp1 = load i16, i16* %tmp, align 4
334 %tmp11 = call i16 @llvm.bswap.i16(i16 %tmp1)
335 %tmp2 = zext i16 %tmp11 to i32
336 %tmp3 = getelementptr inbounds i16, i16* %tmp, i32 1
337 %tmp4 = load i16, i16* %tmp3, align 1
338 %tmp41 = call i16 @llvm.bswap.i16(i16 %tmp4)
339 %tmp5 = zext i16 %tmp41 to i32
340 %tmp6 = shl nuw nsw i32 %tmp2, 16
341 %tmp7 = or i32 %tmp6, %tmp5
342 ret i32 %tmp7
343}
Artur Pilipenko469596e2017-02-07 14:09:37 +0000344
345; i16* p; // p is 4 byte aligned
346; (i32) p[0] | (sext(p[1] << 16) to i32)
347define i32 @load_i32_by_sext_i16(i32* %arg) {
348; CHECK-LABEL: load_i32_by_sext_i16:
349; CHECK: ldrh w8, [x0]
350; CHECK-NEXT: ldrh w9, [x0, #2]
351; CHECK-NEXT: bfi w8, w9, #16, #16
352; CHECK-NEXT: mov w0, w8
353; CHECK-NEXT: ret
354
355 %tmp = bitcast i32* %arg to i16*
356 %tmp1 = load i16, i16* %tmp, align 4
357 %tmp2 = zext i16 %tmp1 to i32
358 %tmp3 = getelementptr inbounds i16, i16* %tmp, i32 1
359 %tmp4 = load i16, i16* %tmp3, align 1
360 %tmp5 = sext i16 %tmp4 to i32
361 %tmp6 = shl nuw nsw i32 %tmp5, 16
362 %tmp7 = or i32 %tmp6, %tmp2
363 ret i32 %tmp7
364}
365
366; i8* arg; i32 i;
367; p = arg + 12;
368; (i32) p[i] | ((i32) p[i + 1] << 8) | ((i32) p[i + 2] << 16) | ((i32) p[i + 3] << 24)
369define i32 @load_i32_by_i8_base_offset_index(i8* %arg, i32 %i) {
370; CHECK-LABEL: load_i32_by_i8_base_offset_index:
371; CHECK: add x8, x0, w1, uxtw
372; CHECK-NEXT: ldr w0, [x8, #12]
373; CHECK-NEXT: ret
374 %tmp = add nuw nsw i32 %i, 3
375 %tmp2 = add nuw nsw i32 %i, 2
376 %tmp3 = add nuw nsw i32 %i, 1
377 %tmp4 = getelementptr inbounds i8, i8* %arg, i64 12
378 %tmp5 = zext i32 %i to i64
379 %tmp6 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp5
380 %tmp7 = load i8, i8* %tmp6, align 4
381 %tmp8 = zext i8 %tmp7 to i32
382 %tmp9 = zext i32 %tmp3 to i64
383 %tmp10 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp9
384 %tmp11 = load i8, i8* %tmp10, align 1
385 %tmp12 = zext i8 %tmp11 to i32
386 %tmp13 = shl nuw nsw i32 %tmp12, 8
387 %tmp14 = or i32 %tmp13, %tmp8
388 %tmp15 = zext i32 %tmp2 to i64
389 %tmp16 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp15
390 %tmp17 = load i8, i8* %tmp16, align 1
391 %tmp18 = zext i8 %tmp17 to i32
392 %tmp19 = shl nuw nsw i32 %tmp18, 16
393 %tmp20 = or i32 %tmp14, %tmp19
394 %tmp21 = zext i32 %tmp to i64
395 %tmp22 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp21
396 %tmp23 = load i8, i8* %tmp22, align 1
397 %tmp24 = zext i8 %tmp23 to i32
398 %tmp25 = shl nuw i32 %tmp24, 24
399 %tmp26 = or i32 %tmp20, %tmp25
400 ret i32 %tmp26
401}
402
403; i8* arg; i32 i;
404; p = arg + 12;
405; (i32) p[i + 1] | ((i32) p[i + 2] << 8) | ((i32) p[i + 3] << 16) | ((i32) p[i + 4] << 24)
406define i32 @load_i32_by_i8_base_offset_index_2(i8* %arg, i32 %i) {
407; CHECK-LABEL: load_i32_by_i8_base_offset_index_2:
408; CHECK: add x8, x0, w1, uxtw
409; CHECK-NEXT: ldrb w0, [x8, #13]
410; CHECK-NEXT: ldrb w9, [x8, #14]
411; CHECK-NEXT: ldrb w10, [x8, #15]
412; CHECK-NEXT: ldrb w8, [x8, #16]
413; CHECK-NEXT: bfi w0, w9, #8, #8
414; CHECK-NEXT: bfi w0, w10, #16, #8
415; CHECK-NEXT: bfi w0, w8, #24, #8
416; CHECK-NEXT: ret
417
418 %tmp = add nuw nsw i32 %i, 4
419 %tmp2 = add nuw nsw i32 %i, 3
420 %tmp3 = add nuw nsw i32 %i, 2
421 %tmp4 = getelementptr inbounds i8, i8* %arg, i64 12
422 %tmp5 = add nuw nsw i32 %i, 1
423 %tmp27 = zext i32 %tmp5 to i64
424 %tmp28 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp27
425 %tmp29 = load i8, i8* %tmp28, align 4
426 %tmp30 = zext i8 %tmp29 to i32
427 %tmp31 = zext i32 %tmp3 to i64
428 %tmp32 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp31
429 %tmp33 = load i8, i8* %tmp32, align 1
430 %tmp34 = zext i8 %tmp33 to i32
431 %tmp35 = shl nuw nsw i32 %tmp34, 8
432 %tmp36 = or i32 %tmp35, %tmp30
433 %tmp37 = zext i32 %tmp2 to i64
434 %tmp38 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp37
435 %tmp39 = load i8, i8* %tmp38, align 1
436 %tmp40 = zext i8 %tmp39 to i32
437 %tmp41 = shl nuw nsw i32 %tmp40, 16
438 %tmp42 = or i32 %tmp36, %tmp41
439 %tmp43 = zext i32 %tmp to i64
440 %tmp44 = getelementptr inbounds i8, i8* %tmp4, i64 %tmp43
441 %tmp45 = load i8, i8* %tmp44, align 1
442 %tmp46 = zext i8 %tmp45 to i32
443 %tmp47 = shl nuw i32 %tmp46, 24
444 %tmp48 = or i32 %tmp42, %tmp47
445 ret i32 %tmp48
446}