Bill Schmidt | ccecf26 | 2013-08-30 02:29:45 +0000 | [diff] [blame] | 1 | ; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s --check-prefix=ELF64 |
| 2 | |
| 3 | ; This test verifies that load/store instructions are properly generated, |
| 4 | ; and that they pass MI verification. |
| 5 | |
| 6 | @a = global i8 1, align 1 |
| 7 | @b = global i16 2, align 2 |
| 8 | @c = global i32 4, align 4 |
| 9 | @d = global i64 8, align 8 |
| 10 | @e = global float 1.25, align 4 |
| 11 | @f = global double 3.5, align 8 |
| 12 | |
| 13 | %struct.s = type<{ i8, i32 }> |
| 14 | %struct.t = type<{ i8, i64 }> |
| 15 | |
| 16 | @g = global %struct.s <{ i8 1, i32 2 }>, align 1 |
| 17 | @h = global %struct.t <{ i8 1, i64 2 }>, align 1 |
| 18 | |
| 19 | @i = common global [8192 x i64] zeroinitializer, align 8 |
| 20 | |
| 21 | ; load |
| 22 | |
| 23 | define i8 @t1() nounwind uwtable ssp { |
| 24 | ; ELF64: t1 |
| 25 | %1 = load i8* @a, align 1 |
| 26 | ; ELF64: lbz |
| 27 | %2 = add nsw i8 %1, 1 |
| 28 | ; ELF64: addi |
| 29 | ret i8 %2 |
| 30 | } |
| 31 | |
| 32 | define i16 @t2() nounwind uwtable ssp { |
| 33 | ; ELF64: t2 |
| 34 | %1 = load i16* @b, align 2 |
| 35 | ; ELF64: lhz |
| 36 | %2 = add nsw i16 %1, 1 |
| 37 | ; ELF64: addi |
| 38 | ret i16 %2 |
| 39 | } |
| 40 | |
| 41 | define i32 @t3() nounwind uwtable ssp { |
| 42 | ; ELF64: t3 |
| 43 | %1 = load i32* @c, align 4 |
| 44 | ; ELF64: lwz |
| 45 | %2 = add nsw i32 %1, 1 |
| 46 | ; ELF64: addi |
| 47 | ret i32 %2 |
| 48 | } |
| 49 | |
| 50 | define i64 @t4() nounwind uwtable ssp { |
| 51 | ; ELF64: t4 |
| 52 | %1 = load i64* @d, align 4 |
| 53 | ; ELF64: ld |
| 54 | %2 = add nsw i64 %1, 1 |
| 55 | ; ELF64: addi |
| 56 | ret i64 %2 |
| 57 | } |
| 58 | |
| 59 | define float @t5() nounwind uwtable ssp { |
| 60 | ; ELF64: t5 |
| 61 | %1 = load float* @e, align 4 |
| 62 | ; ELF64: lfs |
| 63 | %2 = fadd float %1, 1.0 |
| 64 | ; ELF64: fadds |
| 65 | ret float %2 |
| 66 | } |
| 67 | |
| 68 | define double @t6() nounwind uwtable ssp { |
| 69 | ; ELF64: t6 |
| 70 | %1 = load double* @f, align 8 |
| 71 | ; ELF64: lfd |
| 72 | %2 = fadd double %1, 1.0 |
| 73 | ; ELF64: fadd |
| 74 | ret double %2 |
| 75 | } |
| 76 | |
| 77 | ; store |
| 78 | |
| 79 | define void @t7(i8 %v) nounwind uwtable ssp { |
| 80 | ; ELF64: t7 |
| 81 | %1 = add nsw i8 %v, 1 |
| 82 | store i8 %1, i8* @a, align 1 |
| 83 | ; ELF64: addis |
| 84 | ; ELF64: addi |
| 85 | ; ELF64: addi |
| 86 | ; ELF64: stb |
| 87 | ret void |
| 88 | } |
| 89 | |
| 90 | define void @t8(i16 %v) nounwind uwtable ssp { |
| 91 | ; ELF64: t8 |
| 92 | %1 = add nsw i16 %v, 1 |
| 93 | store i16 %1, i16* @b, align 2 |
| 94 | ; ELF64: addis |
| 95 | ; ELF64: addi |
| 96 | ; ELF64: addi |
| 97 | ; ELF64: sth |
| 98 | ret void |
| 99 | } |
| 100 | |
| 101 | define void @t9(i32 %v) nounwind uwtable ssp { |
| 102 | ; ELF64: t9 |
| 103 | %1 = add nsw i32 %v, 1 |
| 104 | store i32 %1, i32* @c, align 4 |
| 105 | ; ELF64: addis |
| 106 | ; ELF64: addi |
| 107 | ; ELF64: addi |
| 108 | ; ELF64: stw |
| 109 | ret void |
| 110 | } |
| 111 | |
| 112 | define void @t10(i64 %v) nounwind uwtable ssp { |
| 113 | ; ELF64: t10 |
| 114 | %1 = add nsw i64 %v, 1 |
| 115 | store i64 %1, i64* @d, align 4 |
| 116 | ; ELF64: addis |
| 117 | ; ELF64: addi |
| 118 | ; ELF64: addi |
| 119 | ; ELF64: std |
| 120 | ret void |
| 121 | } |
| 122 | |
| 123 | define void @t11(float %v) nounwind uwtable ssp { |
| 124 | ; ELF64: t11 |
| 125 | %1 = fadd float %v, 1.0 |
| 126 | store float %1, float* @e, align 4 |
| 127 | ; ELF64: fadds |
| 128 | ; ELF64: stfs |
| 129 | ret void |
| 130 | } |
| 131 | |
| 132 | define void @t12(double %v) nounwind uwtable ssp { |
| 133 | ; ELF64: t12 |
| 134 | %1 = fadd double %v, 1.0 |
| 135 | store double %1, double* @f, align 8 |
| 136 | ; ELF64: fadd |
| 137 | ; ELF64: stfd |
| 138 | ret void |
| 139 | } |
| 140 | |
| 141 | ;; lwa requires an offset divisible by 4, so we need lwax here. |
| 142 | define i64 @t13() nounwind uwtable ssp { |
| 143 | ; ELF64: t13 |
| 144 | %1 = load i32* getelementptr inbounds (%struct.s* @g, i32 0, i32 1), align 1 |
| 145 | %2 = sext i32 %1 to i64 |
| 146 | ; ELF64: li |
| 147 | ; ELF64: lwax |
| 148 | %3 = add nsw i64 %2, 1 |
| 149 | ; ELF64: addi |
| 150 | ret i64 %3 |
| 151 | } |
| 152 | |
| 153 | ;; ld requires an offset divisible by 4, so we need ldx here. |
| 154 | define i64 @t14() nounwind uwtable ssp { |
| 155 | ; ELF64: t14 |
| 156 | %1 = load i64* getelementptr inbounds (%struct.t* @h, i32 0, i32 1), align 1 |
| 157 | ; ELF64: li |
| 158 | ; ELF64: ldx |
| 159 | %2 = add nsw i64 %1, 1 |
| 160 | ; ELF64: addi |
| 161 | ret i64 %2 |
| 162 | } |
| 163 | |
| 164 | ;; std requires an offset divisible by 4, so we need stdx here. |
| 165 | define void @t15(i64 %v) nounwind uwtable ssp { |
| 166 | ; ELF64: t15 |
| 167 | %1 = add nsw i64 %v, 1 |
| 168 | store i64 %1, i64* getelementptr inbounds (%struct.t* @h, i32 0, i32 1), align 1 |
| 169 | ; ELF64: addis |
| 170 | ; ELF64: addi |
| 171 | ; ELF64: addi |
| 172 | ; ELF64: li |
| 173 | ; ELF64: stdx |
| 174 | ret void |
| 175 | } |
| 176 | |
| 177 | ;; ld requires an offset that fits in 16 bits, so we need ldx here. |
| 178 | define i64 @t16() nounwind uwtable ssp { |
| 179 | ; ELF64: t16 |
| 180 | %1 = load i64* getelementptr inbounds ([8192 x i64]* @i, i32 0, i64 5000), align 8 |
| 181 | ; ELF64: lis |
| 182 | ; ELF64: ori |
| 183 | ; ELF64: ldx |
| 184 | %2 = add nsw i64 %1, 1 |
| 185 | ; ELF64: addi |
| 186 | ret i64 %2 |
| 187 | } |
| 188 | |
| 189 | ;; std requires an offset that fits in 16 bits, so we need stdx here. |
| 190 | define void @t17(i64 %v) nounwind uwtable ssp { |
| 191 | ; ELF64: t17 |
| 192 | %1 = add nsw i64 %v, 1 |
| 193 | store i64 %1, i64* getelementptr inbounds ([8192 x i64]* @i, i32 0, i64 5000), align 8 |
| 194 | ; ELF64: addis |
| 195 | ; ELF64: ld |
| 196 | ; ELF64: addi |
| 197 | ; ELF64: lis |
| 198 | ; ELF64: ori |
| 199 | ; ELF64: stdx |
| 200 | ret void |
| 201 | } |
| 202 | |