blob: 91f41f6be495f59075da093d74647ccbef85bdce [file] [log] [blame]
Reid Spencer1c75ef22007-04-15 22:20:47 +00001; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=intel | \
Reid Spencerb1003252007-04-16 14:20:28 +00002; RUN: grep -i ST | not grep {fadd\\|fsub\\|fdiv\\|fmul}
Chris Lattnerfe94f0b2004-04-11 22:05:16 +00003
4; Test that the load of the memory location is folded into the operation.
5
6
7double %test_add(double %X, double *%P) {
8 %Y = load double* %P
9 %R = add double %X, %Y
10 ret double %R
11}
12
13double %test_mul(double %X, double *%P) {
14 %Y = load double* %P
15 %R = mul double %X, %Y
16 ret double %R
17}
18
19double %test_sub(double %X, double *%P) {
20 %Y = load double* %P
21 %R = sub double %X, %Y
22 ret double %R
23}
24
25double %test_subr(double %X, double *%P) {
26 %Y = load double* %P
27 %R = sub double %Y, %X
28 ret double %R
29}
30
31double %test_div(double %X, double *%P) {
32 %Y = load double* %P
33 %R = div double %X, %Y
34 ret double %R
35}
36
37double %test_divr(double %X, double *%P) {
38 %Y = load double* %P
39 %R = div double %Y, %X
40 ret double %R
41}