Reid Spencer | d0e30dc | 2006-12-02 04:23:10 +0000 | [diff] [blame] | 1 | ; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=intel | grep ST | not grep 'fadd\|fsub\|fdiv\|fmul' |
Chris Lattner | fe94f0b | 2004-04-11 22:05:16 +0000 | [diff] [blame] | 2 | |
| 3 | ; Test that the load of the memory location is folded into the operation. |
| 4 | |
| 5 | |
| 6 | double %test_add(double %X, double *%P) { |
| 7 | %Y = load double* %P |
| 8 | %R = add double %X, %Y |
| 9 | ret double %R |
| 10 | } |
| 11 | |
| 12 | double %test_mul(double %X, double *%P) { |
| 13 | %Y = load double* %P |
| 14 | %R = mul double %X, %Y |
| 15 | ret double %R |
| 16 | } |
| 17 | |
| 18 | double %test_sub(double %X, double *%P) { |
| 19 | %Y = load double* %P |
| 20 | %R = sub double %X, %Y |
| 21 | ret double %R |
| 22 | } |
| 23 | |
| 24 | double %test_subr(double %X, double *%P) { |
| 25 | %Y = load double* %P |
| 26 | %R = sub double %Y, %X |
| 27 | ret double %R |
| 28 | } |
| 29 | |
| 30 | double %test_div(double %X, double *%P) { |
| 31 | %Y = load double* %P |
| 32 | %R = div double %X, %Y |
| 33 | ret double %R |
| 34 | } |
| 35 | |
| 36 | double %test_divr(double %X, double *%P) { |
| 37 | %Y = load double* %P |
| 38 | %R = div double %Y, %X |
| 39 | ret double %R |
| 40 | } |