blob: dffee70b6b02fd5beda1861e1bb164e3fb7f0df7 [file] [log] [blame]
Tom Stellard880a80a2014-06-17 16:53:14 +00001; XFAIL: *
Tom Stellard49f8bfd2015-01-06 18:00:21 +00002; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=SI -mattr=-promote-alloca < %s | FileCheck -check-prefix=SI %s
Marek Olsak75170772015-01-27 17:27:15 +00003; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tonga -mattr=-promote-alloca < %s | FileCheck -check-prefix=SI %s
Tom Stellard880a80a2014-06-17 16:53:14 +00004
Matt Arsenault2adca602014-05-12 17:14:48 +00005; 64-bit select was originally lowered with a build_pair, and this
6; could be simplified to 1 cndmask instead of 2, but that broken when
7; it started being implemented with a v2i32 build_vector and
8; bitcasting.
9define void @trunc_select_i64(i32 addrspace(1)* %out, i64 %a, i64 %b, i32 %c) {
10 %cmp = icmp eq i32 %c, 0
11 %select = select i1 %cmp, i64 %a, i64 %b
12 %trunc = trunc i64 %select to i32
13 store i32 %trunc, i32 addrspace(1)* %out, align 4
14 ret void
15}
16
Tom Stellard880a80a2014-06-17 16:53:14 +000017; FIXME: Fix truncating store for local memory
Tom Stellard79243d92014-10-01 17:15:17 +000018; SI-LABEL: {{^}}trunc_load_alloca_i64:
Tom Stellard326d6ec2014-11-05 14:50:53 +000019; SI: v_movrels_b32
20; SI-NOT: v_movrels_b32
21; SI: s_endpgm
Matt Arsenault2adca602014-05-12 17:14:48 +000022define void @trunc_load_alloca_i64(i64 addrspace(1)* %out, i32 %a, i32 %b) {
23 %idx = add i32 %a, %b
24 %alloca = alloca i64, i32 4
David Blaikie79e6c742015-02-27 19:29:02 +000025 %gep0 = getelementptr i64, i64* %alloca, i64 0
26 %gep1 = getelementptr i64, i64* %alloca, i64 1
27 %gep2 = getelementptr i64, i64* %alloca, i64 2
28 %gep3 = getelementptr i64, i64* %alloca, i64 3
Matt Arsenault2adca602014-05-12 17:14:48 +000029 store i64 24, i64* %gep0, align 8
30 store i64 9334, i64* %gep1, align 8
31 store i64 3935, i64* %gep2, align 8
32 store i64 9342, i64* %gep3, align 8
David Blaikie79e6c742015-02-27 19:29:02 +000033 %gep = getelementptr i64, i64* %alloca, i32 %idx
David Blaikiea79ac142015-02-27 21:17:42 +000034 %load = load i64, i64* %gep, align 8
Matt Arsenault2adca602014-05-12 17:14:48 +000035 %mask = and i64 %load, 4294967296
36 %add = add i64 %mask, -1
37 store i64 %add, i64 addrspace(1)* %out, align 4
38 ret void
39}