Bill Schmidt | 4187962 | 2014-12-09 16:44:58 +0000 | [diff] [blame^] | 1 | ;; Note: This test case disable VSX until LE support is enabled, as |
| 2 | ;; otherwise we fail trying to deal with the @llvm.ppc.vsx.* builtins |
| 3 | ;; for loads and stores. |
| 4 | ; RUN: llc -mcpu=pwr8 -O2 -mtriple=powerpc64-unknown-linux-gnu < %s |
| 5 | ;; FIXME: Delete this and above lines when VSX LE support enabled. |
| 6 | |
| 7 | ; R;UN: llc -mcpu=pwr8 -mattr=+vsx -O2 -mtriple=powerpc64le-unknown-linux-gnu < %s > %t |
| 8 | ; R;UN: grep lxvd2x < %t | count 18 |
| 9 | ; R;UN: grep stxvd2x < %t | count 18 |
| 10 | ; R;UN: grep xxpermdi < %t | count 36 |
| 11 | |
| 12 | @vf = global <4 x float> <float -1.500000e+00, float 2.500000e+00, float -3.500000e+00, float 4.500000e+00>, align 16 |
| 13 | @vd = global <2 x double> <double 3.500000e+00, double -7.500000e+00>, align 16 |
| 14 | @vsi = global <4 x i32> <i32 -1, i32 2, i32 -3, i32 4>, align 16 |
| 15 | @vui = global <4 x i32> <i32 0, i32 1, i32 2, i32 3>, align 16 |
| 16 | @vsll = global <2 x i64> <i64 255, i64 -937>, align 16 |
| 17 | @vull = global <2 x i64> <i64 1447, i64 2894>, align 16 |
| 18 | @res_vsi = common global <4 x i32> zeroinitializer, align 16 |
| 19 | @res_vui = common global <4 x i32> zeroinitializer, align 16 |
| 20 | @res_vf = common global <4 x float> zeroinitializer, align 16 |
| 21 | @res_vsll = common global <2 x i64> zeroinitializer, align 16 |
| 22 | @res_vull = common global <2 x i64> zeroinitializer, align 16 |
| 23 | @res_vd = common global <2 x double> zeroinitializer, align 16 |
| 24 | |
| 25 | define void @test1() { |
| 26 | entry: |
| 27 | ; CHECK-LABEL: test1 |
| 28 | %__a.addr.i31 = alloca i32, align 4 |
| 29 | %__b.addr.i32 = alloca <4 x i32>*, align 8 |
| 30 | %__a.addr.i29 = alloca i32, align 4 |
| 31 | %__b.addr.i30 = alloca <4 x float>*, align 8 |
| 32 | %__a.addr.i27 = alloca i32, align 4 |
| 33 | %__b.addr.i28 = alloca <2 x i64>*, align 8 |
| 34 | %__a.addr.i25 = alloca i32, align 4 |
| 35 | %__b.addr.i26 = alloca <2 x i64>*, align 8 |
| 36 | %__a.addr.i23 = alloca i32, align 4 |
| 37 | %__b.addr.i24 = alloca <2 x double>*, align 8 |
| 38 | %__a.addr.i20 = alloca <4 x i32>, align 16 |
| 39 | %__b.addr.i21 = alloca i32, align 4 |
| 40 | %__c.addr.i22 = alloca <4 x i32>*, align 8 |
| 41 | %__a.addr.i17 = alloca <4 x i32>, align 16 |
| 42 | %__b.addr.i18 = alloca i32, align 4 |
| 43 | %__c.addr.i19 = alloca <4 x i32>*, align 8 |
| 44 | %__a.addr.i14 = alloca <4 x float>, align 16 |
| 45 | %__b.addr.i15 = alloca i32, align 4 |
| 46 | %__c.addr.i16 = alloca <4 x float>*, align 8 |
| 47 | %__a.addr.i11 = alloca <2 x i64>, align 16 |
| 48 | %__b.addr.i12 = alloca i32, align 4 |
| 49 | %__c.addr.i13 = alloca <2 x i64>*, align 8 |
| 50 | %__a.addr.i8 = alloca <2 x i64>, align 16 |
| 51 | %__b.addr.i9 = alloca i32, align 4 |
| 52 | %__c.addr.i10 = alloca <2 x i64>*, align 8 |
| 53 | %__a.addr.i6 = alloca <2 x double>, align 16 |
| 54 | %__b.addr.i7 = alloca i32, align 4 |
| 55 | %__c.addr.i = alloca <2 x double>*, align 8 |
| 56 | %__a.addr.i = alloca i32, align 4 |
| 57 | %__b.addr.i = alloca <4 x i32>*, align 8 |
| 58 | store i32 0, i32* %__a.addr.i, align 4 |
| 59 | store <4 x i32>* @vsi, <4 x i32>** %__b.addr.i, align 8 |
| 60 | %0 = load i32* %__a.addr.i, align 4 |
| 61 | %1 = load <4 x i32>** %__b.addr.i, align 8 |
| 62 | %2 = bitcast <4 x i32>* %1 to i8* |
| 63 | %3 = getelementptr i8* %2, i32 %0 |
| 64 | %4 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* %3) |
| 65 | store <4 x i32> %4, <4 x i32>* @res_vsi, align 16 |
| 66 | store i32 0, i32* %__a.addr.i31, align 4 |
| 67 | store <4 x i32>* @vui, <4 x i32>** %__b.addr.i32, align 8 |
| 68 | %5 = load i32* %__a.addr.i31, align 4 |
| 69 | %6 = load <4 x i32>** %__b.addr.i32, align 8 |
| 70 | %7 = bitcast <4 x i32>* %6 to i8* |
| 71 | %8 = getelementptr i8* %7, i32 %5 |
| 72 | %9 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* %8) |
| 73 | store <4 x i32> %9, <4 x i32>* @res_vui, align 16 |
| 74 | store i32 0, i32* %__a.addr.i29, align 4 |
| 75 | store <4 x float>* @vf, <4 x float>** %__b.addr.i30, align 8 |
| 76 | %10 = load i32* %__a.addr.i29, align 4 |
| 77 | %11 = load <4 x float>** %__b.addr.i30, align 8 |
| 78 | %12 = bitcast <4 x float>* %11 to i8* |
| 79 | %13 = getelementptr i8* %12, i32 %10 |
| 80 | %14 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* %13) |
| 81 | %15 = bitcast <4 x i32> %14 to <4 x float> |
| 82 | store <4 x float> %15, <4 x float>* @res_vf, align 16 |
| 83 | store i32 0, i32* %__a.addr.i27, align 4 |
| 84 | store <2 x i64>* @vsll, <2 x i64>** %__b.addr.i28, align 8 |
| 85 | %16 = load i32* %__a.addr.i27, align 4 |
| 86 | %17 = load <2 x i64>** %__b.addr.i28, align 8 |
| 87 | %18 = bitcast <2 x i64>* %17 to i8* |
| 88 | %19 = getelementptr i8* %18, i32 %16 |
| 89 | %20 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* %19) |
| 90 | %21 = bitcast <2 x double> %20 to <2 x i64> |
| 91 | store <2 x i64> %21, <2 x i64>* @res_vsll, align 16 |
| 92 | store i32 0, i32* %__a.addr.i25, align 4 |
| 93 | store <2 x i64>* @vull, <2 x i64>** %__b.addr.i26, align 8 |
| 94 | %22 = load i32* %__a.addr.i25, align 4 |
| 95 | %23 = load <2 x i64>** %__b.addr.i26, align 8 |
| 96 | %24 = bitcast <2 x i64>* %23 to i8* |
| 97 | %25 = getelementptr i8* %24, i32 %22 |
| 98 | %26 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* %25) |
| 99 | %27 = bitcast <2 x double> %26 to <2 x i64> |
| 100 | store <2 x i64> %27, <2 x i64>* @res_vull, align 16 |
| 101 | store i32 0, i32* %__a.addr.i23, align 4 |
| 102 | store <2 x double>* @vd, <2 x double>** %__b.addr.i24, align 8 |
| 103 | %28 = load i32* %__a.addr.i23, align 4 |
| 104 | %29 = load <2 x double>** %__b.addr.i24, align 8 |
| 105 | %30 = bitcast <2 x double>* %29 to i8* |
| 106 | %31 = getelementptr i8* %30, i32 %28 |
| 107 | %32 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* %31) |
| 108 | store <2 x double> %32, <2 x double>* @res_vd, align 16 |
| 109 | %33 = load <4 x i32>* @vsi, align 16 |
| 110 | store <4 x i32> %33, <4 x i32>* %__a.addr.i20, align 16 |
| 111 | store i32 0, i32* %__b.addr.i21, align 4 |
| 112 | store <4 x i32>* @res_vsi, <4 x i32>** %__c.addr.i22, align 8 |
| 113 | %34 = load <4 x i32>* %__a.addr.i20, align 16 |
| 114 | %35 = load i32* %__b.addr.i21, align 4 |
| 115 | %36 = load <4 x i32>** %__c.addr.i22, align 8 |
| 116 | %37 = bitcast <4 x i32>* %36 to i8* |
| 117 | %38 = getelementptr i8* %37, i32 %35 |
| 118 | call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %34, i8* %38) |
| 119 | %39 = load <4 x i32>* @vui, align 16 |
| 120 | store <4 x i32> %39, <4 x i32>* %__a.addr.i17, align 16 |
| 121 | store i32 0, i32* %__b.addr.i18, align 4 |
| 122 | store <4 x i32>* @res_vui, <4 x i32>** %__c.addr.i19, align 8 |
| 123 | %40 = load <4 x i32>* %__a.addr.i17, align 16 |
| 124 | %41 = load i32* %__b.addr.i18, align 4 |
| 125 | %42 = load <4 x i32>** %__c.addr.i19, align 8 |
| 126 | %43 = bitcast <4 x i32>* %42 to i8* |
| 127 | %44 = getelementptr i8* %43, i32 %41 |
| 128 | call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %40, i8* %44) |
| 129 | %45 = load <4 x float>* @vf, align 16 |
| 130 | store <4 x float> %45, <4 x float>* %__a.addr.i14, align 16 |
| 131 | store i32 0, i32* %__b.addr.i15, align 4 |
| 132 | store <4 x float>* @res_vf, <4 x float>** %__c.addr.i16, align 8 |
| 133 | %46 = load <4 x float>* %__a.addr.i14, align 16 |
| 134 | %47 = bitcast <4 x float> %46 to <4 x i32> |
| 135 | %48 = load i32* %__b.addr.i15, align 4 |
| 136 | %49 = load <4 x float>** %__c.addr.i16, align 8 |
| 137 | %50 = bitcast <4 x float>* %49 to i8* |
| 138 | %51 = getelementptr i8* %50, i32 %48 |
| 139 | call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %47, i8* %51) #1 |
| 140 | %52 = load <2 x i64>* @vsll, align 16 |
| 141 | store <2 x i64> %52, <2 x i64>* %__a.addr.i11, align 16 |
| 142 | store i32 0, i32* %__b.addr.i12, align 4 |
| 143 | store <2 x i64>* @res_vsll, <2 x i64>** %__c.addr.i13, align 8 |
| 144 | %53 = load <2 x i64>* %__a.addr.i11, align 16 |
| 145 | %54 = bitcast <2 x i64> %53 to <2 x double> |
| 146 | %55 = load i32* %__b.addr.i12, align 4 |
| 147 | %56 = load <2 x i64>** %__c.addr.i13, align 8 |
| 148 | %57 = bitcast <2 x i64>* %56 to i8* |
| 149 | %58 = getelementptr i8* %57, i32 %55 |
| 150 | call void @llvm.ppc.vsx.stxvd2x(<2 x double> %54, i8* %58) |
| 151 | %59 = load <2 x i64>* @vull, align 16 |
| 152 | store <2 x i64> %59, <2 x i64>* %__a.addr.i8, align 16 |
| 153 | store i32 0, i32* %__b.addr.i9, align 4 |
| 154 | store <2 x i64>* @res_vull, <2 x i64>** %__c.addr.i10, align 8 |
| 155 | %60 = load <2 x i64>* %__a.addr.i8, align 16 |
| 156 | %61 = bitcast <2 x i64> %60 to <2 x double> |
| 157 | %62 = load i32* %__b.addr.i9, align 4 |
| 158 | %63 = load <2 x i64>** %__c.addr.i10, align 8 |
| 159 | %64 = bitcast <2 x i64>* %63 to i8* |
| 160 | %65 = getelementptr i8* %64, i32 %62 |
| 161 | call void @llvm.ppc.vsx.stxvd2x(<2 x double> %61, i8* %65) |
| 162 | %66 = load <2 x double>* @vd, align 16 |
| 163 | store <2 x double> %66, <2 x double>* %__a.addr.i6, align 16 |
| 164 | store i32 0, i32* %__b.addr.i7, align 4 |
| 165 | store <2 x double>* @res_vd, <2 x double>** %__c.addr.i, align 8 |
| 166 | %67 = load <2 x double>* %__a.addr.i6, align 16 |
| 167 | %68 = load i32* %__b.addr.i7, align 4 |
| 168 | %69 = load <2 x double>** %__c.addr.i, align 8 |
| 169 | %70 = bitcast <2 x double>* %69 to i8* |
| 170 | %71 = getelementptr i8* %70, i32 %68 |
| 171 | call void @llvm.ppc.vsx.stxvd2x(<2 x double> %67, i8* %71) |
| 172 | ret void |
| 173 | } |
| 174 | |
| 175 | declare void @llvm.ppc.vsx.stxvd2x(<2 x double>, i8*) |
| 176 | declare void @llvm.ppc.vsx.stxvw4x(<4 x i32>, i8*) |
| 177 | declare <2 x double> @llvm.ppc.vsx.lxvd2x(i8*) |
| 178 | declare <4 x i32> @llvm.ppc.vsx.lxvw4x(i8*) |