blob: 16c57f7f38695a39f4c1ff92acc10cb06ae71bb8 [file] [log] [blame]
Tom Stellard70f13db2013-10-10 17:11:46 +00001; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s
Matt Arsenault6f243792013-09-05 19:41:10 +00002; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG %s
3
Matt Arsenault6f243792013-09-05 19:41:10 +00004define void @trunc_i64_to_i32_store(i32 addrspace(1)* %out, i64 %in) {
Tom Stellard79243d92014-10-01 17:15:17 +00005; SI-LABEL: {{^}}trunc_i64_to_i32_store:
Matt Arsenaulta9fcf622014-09-06 20:37:56 +00006; SI: S_LOAD_DWORD [[SLOAD:s[0-9]+]], s[0:1], 0xb
7; SI: V_MOV_B32_e32 [[VLOAD:v[0-9]+]], [[SLOAD]]
8; SI: BUFFER_STORE_DWORD [[VLOAD]]
Matt Arsenault6f243792013-09-05 19:41:10 +00009
Tom Stellard79243d92014-10-01 17:15:17 +000010; EG-LABEL: {{^}}trunc_i64_to_i32_store:
Matt Arsenault6f243792013-09-05 19:41:10 +000011; EG: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
12; EG: LSHR
13; EG-NEXT: 2(
14
15 %result = trunc i64 %in to i32 store i32 %result, i32 addrspace(1)* %out, align 4
16 ret void
17}
18
Tom Stellard79243d92014-10-01 17:15:17 +000019; SI-LABEL: {{^}}trunc_load_shl_i64:
Matt Arsenaulta7f1e0c2014-03-24 19:43:31 +000020; SI-DAG: S_LOAD_DWORDX2
21; SI-DAG: S_LOAD_DWORD [[SREG:s[0-9]+]],
22; SI: S_LSHL_B32 [[SHL:s[0-9]+]], [[SREG]], 2
23; SI: V_MOV_B32_e32 [[VSHL:v[0-9]+]], [[SHL]]
24; SI: BUFFER_STORE_DWORD [[VSHL]],
25define void @trunc_load_shl_i64(i32 addrspace(1)* %out, i64 %a) {
26 %b = shl i64 %a, 2
27 %result = trunc i64 %b to i32
28 store i32 %result, i32 addrspace(1)* %out, align 4
29 ret void
30}
31
Tom Stellard79243d92014-10-01 17:15:17 +000032; SI-LABEL: {{^}}trunc_shl_i64:
Matt Arsenaultc10853f2014-08-06 00:29:43 +000033; SI: S_LOAD_DWORDX2 s{{\[}}[[LO_SREG:[0-9]+]]:{{[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd
Matt Arsenault8239eaa2014-09-11 17:34:19 +000034; SI: S_LSHL_B64 s{{\[}}[[LO_SHL:[0-9]+]]:{{[0-9]+\]}}, s{{\[}}[[LO_SREG]]:{{[0-9]+\]}}, 2
35; SI: S_ADD_U32 s[[LO_SREG2:[0-9]+]], s[[LO_SHL]],
Matt Arsenaultc10853f2014-08-06 00:29:43 +000036; SI: S_ADDC_U32
Matt Arsenault8239eaa2014-09-11 17:34:19 +000037; SI: V_MOV_B32_e32 v[[LO_VREG:[0-9]+]], s[[LO_SREG2]]
Matt Arsenault72b31ee2013-11-12 02:35:51 +000038; SI: BUFFER_STORE_DWORD v[[LO_VREG]],
Matt Arsenaultb517c812014-03-27 17:23:31 +000039define void @trunc_shl_i64(i64 addrspace(1)* %out2, i32 addrspace(1)* %out, i64 %a) {
Matt Arsenaulta7f1e0c2014-03-24 19:43:31 +000040 %aa = add i64 %a, 234 ; Prevent shrinking store.
41 %b = shl i64 %aa, 2
Matt Arsenault204cfa62013-10-10 18:04:16 +000042 %result = trunc i64 %b to i32
43 store i32 %result, i32 addrspace(1)* %out, align 4
Matt Arsenaultb517c812014-03-27 17:23:31 +000044 store i64 %b, i64 addrspace(1)* %out2, align 8 ; Prevent reducing ops to 32-bits
Matt Arsenault204cfa62013-10-10 18:04:16 +000045 ret void
46}
Michel Danzerbf1a6412014-01-28 03:01:16 +000047
Tom Stellard79243d92014-10-01 17:15:17 +000048; SI-LABEL: {{^}}trunc_i32_to_i1:
Matt Arsenault49dd4282014-09-15 17:15:02 +000049; SI: V_AND_B32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
Michel Danzerbf1a6412014-01-28 03:01:16 +000050; SI: V_CMP_EQ_I32
Matt Arsenault49dd4282014-09-15 17:15:02 +000051define void @trunc_i32_to_i1(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) {
52 %a = load i32 addrspace(1)* %ptr, align 4
53 %trunc = trunc i32 %a to i1
54 %result = select i1 %trunc, i32 1, i32 0
55 store i32 %result, i32 addrspace(1)* %out, align 4
56 ret void
57}
58
Tom Stellard79243d92014-10-01 17:15:17 +000059; SI-LABEL: {{^}}sgpr_trunc_i32_to_i1:
Matt Arsenault49dd4282014-09-15 17:15:02 +000060; SI: V_AND_B32_e64 v{{[0-9]+}}, 1, s{{[0-9]+}}
61; SI: V_CMP_EQ_I32
62define void @sgpr_trunc_i32_to_i1(i32 addrspace(1)* %out, i32 %a) {
Michel Danzerbf1a6412014-01-28 03:01:16 +000063 %trunc = trunc i32 %a to i1
64 %result = select i1 %trunc, i32 1, i32 0
65 store i32 %result, i32 addrspace(1)* %out, align 4
66 ret void
67}