Krzysztof Parzyszek | 046090d | 2018-03-12 14:01:28 +0000 | [diff] [blame] | 1 | ;RUN: llc -march=hexagon -filetype=obj < %s -o - | llvm-objdump -mv60 -mhvx -d - | FileCheck %s |
| 2 | |
| 3 | ; Should not crash! and map to vxor |
| 4 | |
| 5 | target triple = "hexagon" |
| 6 | |
| 7 | @g0 = common global <32 x i32> zeroinitializer, align 128 |
| 8 | |
| 9 | ; Function Attrs: nounwind |
| 10 | define i32 @f0() #0 { |
| 11 | b0: |
| 12 | %v0 = call <32 x i32> @llvm.hexagon.V6.vd0.128B() |
| 13 | store <32 x i32> %v0, <32 x i32>* @g0, align 128 |
| 14 | ret i32 0 |
| 15 | } |
| 16 | ; CHECK: { v{{[0-9]}} = vxor(v{{[0-9]}},v{{[0-9]}}) |
| 17 | |
| 18 | ; Function Attrs: nounwind |
| 19 | define i32 @f1(i32 %a0) #0 { |
| 20 | b0: |
| 21 | %v0 = alloca i8, align 1 |
| 22 | %v1 = alloca i8, align 1 |
| 23 | %v2 = tail call i64 @llvm.hexagon.S2.asr.i.p.rnd.goodsyntax(i64 5, i32 0) |
| 24 | %v3 = trunc i64 %v2 to i8 |
| 25 | store volatile i8 %v3, i8* %v0, align 1 |
| 26 | %v4 = tail call i64 @llvm.hexagon.S2.asr.i.p.rnd.goodsyntax(i64 4, i32 4) |
| 27 | %v5 = trunc i64 %v4 to i8 |
| 28 | store volatile i8 %v5, i8* %v1, align 1 |
| 29 | %v6 = load volatile i8, i8* %v0, align 1 |
| 30 | %v7 = zext i8 %v6 to i32 |
| 31 | %v8 = load volatile i8, i8* %v1, align 1 |
| 32 | %v9 = zext i8 %v8 to i32 |
| 33 | %v10 = add nuw nsw i32 %v9, %v7 |
| 34 | ret i32 %v10 |
| 35 | } |
| 36 | ; CHECK: combine(#0,#4) |
| 37 | ; CHECK: r{{[0-9]}}:{{[0-9]}} = asr(r{{[0-9]}}:{{[0-9]}},#3):rnd |
| 38 | |
| 39 | ; Function Attrs: nounwind readnone |
| 40 | declare i64 @llvm.hexagon.S2.asr.i.p.rnd.goodsyntax(i64, i32) #1 |
| 41 | |
| 42 | ; Function Attrs: nounwind |
| 43 | define i32 @f2(i32 %a0) #0 { |
| 44 | b0: |
| 45 | %v0 = alloca i8, align 1 |
| 46 | %v1 = alloca i8, align 1 |
| 47 | %v2 = tail call i64 @llvm.hexagon.S5.vasrhrnd.goodsyntax(i64 6, i32 0) |
| 48 | %v3 = trunc i64 %v2 to i8 |
| 49 | store volatile i8 %v3, i8* %v0, align 1 |
| 50 | %v4 = tail call i64 @llvm.hexagon.S5.vasrhrnd.goodsyntax(i64 4, i32 4) |
| 51 | %v5 = trunc i64 %v4 to i8 |
| 52 | store volatile i8 %v5, i8* %v0, align 1 |
| 53 | %v6 = load volatile i8, i8* %v0, align 1 |
| 54 | %v7 = zext i8 %v6 to i32 |
| 55 | %v8 = load volatile i8, i8* %v1, align 1 |
| 56 | %v9 = zext i8 %v8 to i32 |
| 57 | %v10 = add nuw nsw i32 %v9, %v7 |
| 58 | ret i32 %v10 |
| 59 | } |
| 60 | ; CHECK: combine(#0,#4) |
| 61 | ; CHECK: r{{[0-9]}}:{{[0-9]}} = vasrh(r{{[0-9]}}:{{[0-9]}},#3):raw |
| 62 | |
| 63 | ; Function Attrs: nounwind readnone |
| 64 | declare i64 @llvm.hexagon.S5.vasrhrnd.goodsyntax(i64, i32) #1 |
| 65 | |
| 66 | ; Function Attrs: nounwind |
| 67 | define i32 @f3(i32 %a0) #0 { |
| 68 | b0: |
| 69 | %v0 = alloca i8, align 1 |
| 70 | %v1 = alloca i8, align 1 |
| 71 | %v2 = tail call i32 @llvm.hexagon.S5.asrhub.rnd.sat.goodsyntax(i64 0, i32 0) |
| 72 | %v3 = trunc i32 %v2 to i8 |
| 73 | store volatile i8 %v3, i8* %v0, align 1 |
| 74 | %v4 = tail call i32 @llvm.hexagon.S5.asrhub.rnd.sat.goodsyntax(i64 4, i32 4) |
| 75 | %v5 = trunc i32 %v4 to i8 |
| 76 | store volatile i8 %v5, i8* %v1, align 1 |
| 77 | %v6 = load volatile i8, i8* %v0, align 1 |
| 78 | %v7 = zext i8 %v6 to i32 |
| 79 | %v8 = load volatile i8, i8* %v1, align 1 |
| 80 | %v9 = zext i8 %v8 to i32 |
| 81 | %v10 = add nuw nsw i32 %v9, %v7 |
| 82 | ret i32 %v10 |
| 83 | } |
| 84 | ; CHECK: r{{[0-9]}} = vasrhub(r{{[0-9]}}:{{[0-9]}},#3):raw |
| 85 | ; CHECK: r{{[0-9]}} = vsathub(r{{[0-9]}}:{{[0-9]}}) |
| 86 | |
| 87 | ; Function Attrs: nounwind readnone |
| 88 | declare i32 @llvm.hexagon.S5.asrhub.rnd.sat.goodsyntax(i64, i32) #1 |
| 89 | |
| 90 | ; Function Attrs: nounwind readnone |
| 91 | declare <32 x i32> @llvm.hexagon.V6.vd0.128B() #1 |
| 92 | |
| 93 | attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length128b" } |
| 94 | attributes #1 = { nounwind readnone } |