Reid Spencer | 1c75ef2 | 2007-04-15 22:20:47 +0000 | [diff] [blame] | 1 | ; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=intel | \ |
Reid Spencer | b100325 | 2007-04-16 14:20:28 +0000 | [diff] [blame] | 2 | ; RUN: grep -i ST | not grep {fadd\\|fsub\\|fdiv\\|fmul} |
Chris Lattner | fe94f0b | 2004-04-11 22:05:16 +0000 | [diff] [blame] | 3 | |
| 4 | ; Test that the load of the memory location is folded into the operation. |
| 5 | |
| 6 | |
| 7 | double %test_add(double %X, double *%P) { |
| 8 | %Y = load double* %P |
| 9 | %R = add double %X, %Y |
| 10 | ret double %R |
| 11 | } |
| 12 | |
| 13 | double %test_mul(double %X, double *%P) { |
| 14 | %Y = load double* %P |
| 15 | %R = mul double %X, %Y |
| 16 | ret double %R |
| 17 | } |
| 18 | |
| 19 | double %test_sub(double %X, double *%P) { |
| 20 | %Y = load double* %P |
| 21 | %R = sub double %X, %Y |
| 22 | ret double %R |
| 23 | } |
| 24 | |
| 25 | double %test_subr(double %X, double *%P) { |
| 26 | %Y = load double* %P |
| 27 | %R = sub double %Y, %X |
| 28 | ret double %R |
| 29 | } |
| 30 | |
| 31 | double %test_div(double %X, double *%P) { |
| 32 | %Y = load double* %P |
| 33 | %R = div double %X, %Y |
| 34 | ret double %R |
| 35 | } |
| 36 | |
| 37 | double %test_divr(double %X, double *%P) { |
| 38 | %Y = load double* %P |
| 39 | %R = div double %Y, %X |
| 40 | ret double %R |
| 41 | } |