blob: c3a00094dacf0fb143a98750dae607d042c47e37 [file] [log] [blame]
Reid Spencerd0e30dc2006-12-02 04:23:10 +00001; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=intel | grep ST | not grep 'fadd\|fsub\|fdiv\|fmul'
Chris Lattnerfe94f0b2004-04-11 22:05:16 +00002
3; Test that the load of the memory location is folded into the operation.
4
5
6double %test_add(double %X, double *%P) {
7 %Y = load double* %P
8 %R = add double %X, %Y
9 ret double %R
10}
11
12double %test_mul(double %X, double *%P) {
13 %Y = load double* %P
14 %R = mul double %X, %Y
15 ret double %R
16}
17
18double %test_sub(double %X, double *%P) {
19 %Y = load double* %P
20 %R = sub double %X, %Y
21 ret double %R
22}
23
24double %test_subr(double %X, double *%P) {
25 %Y = load double* %P
26 %R = sub double %Y, %X
27 ret double %R
28}
29
30double %test_div(double %X, double *%P) {
31 %Y = load double* %P
32 %R = div double %X, %Y
33 ret double %R
34}
35
36double %test_divr(double %X, double *%P) {
37 %Y = load double* %P
38 %R = div double %Y, %X
39 ret double %R
40}