[mips] Enable IAS by default for 32-bit MIPS targets (O32).
Summary:
The MIPS IAS can now pass 'ninja check-all', recurse, build a bootable linux
kernel, and pass a variety of LNT testing.
Unfortunately we can't enable it by default for 64-bit targets yet since the N32
ABI is still very buggy and this also means we can't enable it for N64 either
because we can't distinguish between N32 and N64 in the relevant code.
Reviewers: vkalintiris
Subscribers: cfe-commits
Differential Revision: http://reviews.llvm.org/D18759
Differential Revision: http://reviews.llvm.org/D18761
llvm-svn: 269560
diff --git a/llvm/test/CodeGen/Mips/hf16call32.ll b/llvm/test/CodeGen/Mips/hf16call32.ll
index 9fc94ca..5159477 100644
--- a/llvm/test/CodeGen/Mips/hf16call32.ll
+++ b/llvm/test/CodeGen/Mips/hf16call32.ll
@@ -820,7 +820,7 @@
declare float @sf_v() #1
; stel: .section .mips16.call.fp.sf_v,"ax",@progbits
; stel: .ent __call_stub_fp_sf_v
-; stel: move $18, $31
+; stel: move $18, $ra
; stel: jal sf_v
; stel: mfc1 $2, $f0
; stel: jr $18
@@ -898,7 +898,7 @@
declare double @df_v() #1
; stel: .section .mips16.call.fp.df_v,"ax",@progbits
; stel: .ent __call_stub_fp_df_v
-; stel: move $18, $31
+; stel: move $18, $ra
; stel: jal df_v
; stel: mfc1 $2, $f0
; stel: mfc1 $3, $f1
@@ -983,7 +983,7 @@
declare { float, float } @sc_v() #1
; stel: .section .mips16.call.fp.sc_v,"ax",@progbits
; stel: .ent __call_stub_fp_sc_v
-; stel: move $18, $31
+; stel: move $18, $ra
; stel: jal sc_v
; stel: mfc1 $2, $f0
; stel: mfc1 $3, $f2
@@ -1004,7 +1004,7 @@
declare { double, double } @dc_v() #1
; stel: .section .mips16.call.fp.dc_v,"ax",@progbits
; stel: .ent __call_stub_fp_dc_v
-; stel: move $18, $31
+; stel: move $18, $ra
; stel: jal dc_v
; stel: mfc1 $4, $f2
; stel: mfc1 $5, $f3
diff --git a/llvm/test/CodeGen/Mips/hf16call32_body.ll b/llvm/test/CodeGen/Mips/hf16call32_body.ll
index 1a04fd4..49ce181 100644
--- a/llvm/test/CodeGen/Mips/hf16call32_body.ll
+++ b/llvm/test/CodeGen/Mips/hf16call32_body.ll
@@ -20,7 +20,8 @@
}
; stel: .section .mips16.fn.v_sf,"ax",@progbits
; stel: .ent __fn_stub_v_sf
-; stel: la $25, v_sf
+; stel: lui $25, %hi(v_sf)
+; stel: addiu $25, $25, %lo(v_sf)
; stel: mfc1 $4, $f12
; stel: jr $25
; stel: __fn_local_v_sf = v_sf
@@ -40,7 +41,8 @@
; stel: .section .mips16.fn.v_df,"ax",@progbits
; stel: .ent __fn_stub_v_df
-; stel: la $25, v_df
+; stel: lui $25, %hi(v_df)
+; stel: addiu $25, $25, %lo(v_df)
; stel: mfc1 $4, $f12
; stel: mfc1 $5, $f13
; stel: jr $25
@@ -63,7 +65,8 @@
; stel: .section .mips16.fn.v_sf_sf,"ax",@progbits
; stel: .ent __fn_stub_v_sf_sf
-; stel: la $25, v_sf_sf
+; stel: lui $25, %hi(v_sf_sf)
+; stel: addiu $25, $25, %lo(v_sf_sf)
; stel: mfc1 $4, $f12
; stel: mfc1 $5, $f14
; stel: jr $25
@@ -86,7 +89,8 @@
; stel: .section .mips16.fn.v_sf_df,"ax",@progbits
; stel: .ent __fn_stub_v_sf_df
-; stel: la $25, v_sf_df
+; stel: lui $25, %hi(v_sf_df)
+; stel: addiu $25, $25, %lo(v_sf_df)
; stel: mfc1 $4, $f12
; stel: mfc1 $6, $f14
; stel: mfc1 $7, $f15
@@ -110,7 +114,8 @@
; stel: .section .mips16.fn.v_df_sf,"ax",@progbits
; stel: .ent __fn_stub_v_df_sf
-; stel: la $25, v_df_sf
+; stel: lui $25, %hi(v_df_sf)
+; stel: addiu $25, $25, %lo(v_df_sf)
; stel: mfc1 $4, $f12
; stel: mfc1 $5, $f13
; stel: mfc1 $6, $f14
@@ -134,7 +139,8 @@
; stel: .section .mips16.fn.v_df_df,"ax",@progbits
; stel: .ent __fn_stub_v_df_df
-; stel: la $25, v_df_df
+; stel: lui $25, %hi(v_df_df)
+; stel: addiu $25, $25, %lo(v_df_df)
; stel: mfc1 $4, $f12
; stel: mfc1 $5, $f13
; stel: mfc1 $6, $f14
@@ -164,7 +170,8 @@
; stel: .section .mips16.fn.sf_sf,"ax",@progbits
; stel: .ent __fn_stub_sf_sf
-; stel: la $25, sf_sf
+; stel: lui $25, %hi(sf_sf)
+; stel: addiu $25, $25, %lo(sf_sf)
; stel: mfc1 $4, $f12
; stel: jr $25
; stel: __fn_local_sf_sf = sf_sf
@@ -184,7 +191,8 @@
; stel: .section .mips16.fn.sf_df,"ax",@progbits
; stel: .ent __fn_stub_sf_df
-; stel: la $25, sf_df
+; stel: lui $25, %hi(sf_df)
+; stel: addiu $25, $25, %lo(sf_df)
; stel: mfc1 $4, $f12
; stel: mfc1 $5, $f13
; stel: jr $25
@@ -208,7 +216,8 @@
; stel: .section .mips16.fn.sf_sf_sf,"ax",@progbits
; stel: .ent __fn_stub_sf_sf_sf
-; stel: la $25, sf_sf_sf
+; stel: lui $25, %hi(sf_sf_sf)
+; stel: addiu $25, $25, %lo(sf_sf_sf)
; stel: mfc1 $4, $f12
; stel: mfc1 $5, $f14
; stel: jr $25
@@ -232,7 +241,8 @@
; stel: .section .mips16.fn.sf_sf_df,"ax",@progbits
; stel: .ent __fn_stub_sf_sf_df
-; stel: la $25, sf_sf_df
+; stel: lui $25, %hi(sf_sf_df)
+; stel: addiu $25, $25, %lo(sf_sf_df)
; stel: mfc1 $4, $f12
; stel: mfc1 $6, $f14
; stel: mfc1 $7, $f15
@@ -257,7 +267,8 @@
; stel: .section .mips16.fn.sf_df_sf,"ax",@progbits
; stel: .ent __fn_stub_sf_df_sf
-; stel: la $25, sf_df_sf
+; stel: lui $25, %hi(sf_df_sf)
+; stel: addiu $25, $25, %lo(sf_df_sf)
; stel: mfc1 $4, $f12
; stel: mfc1 $5, $f13
; stel: mfc1 $6, $f14
@@ -282,7 +293,8 @@
; stel: .section .mips16.fn.sf_df_df,"ax",@progbits
; stel: .ent __fn_stub_sf_df_df
-; stel: la $25, sf_df_df
+; stel: lui $25, %hi(sf_df_df)
+; stel: addiu $25, $25, %lo(sf_df_df)
; stel: mfc1 $4, $f12
; stel: mfc1 $5, $f13
; stel: mfc1 $6, $f14
diff --git a/llvm/test/CodeGen/Mips/hf1_body.ll b/llvm/test/CodeGen/Mips/hf1_body.ll
index adf4510..260f6c3 100644
--- a/llvm/test/CodeGen/Mips/hf1_body.ll
+++ b/llvm/test/CodeGen/Mips/hf1_body.ll
@@ -1,4 +1,11 @@
-; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mattr=mips16 -relocation-model=pic < %s | FileCheck %s -check-prefix=picfp16
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mattr=mips16 \
+; RUN: -relocation-model=pic -no-integrated-as < %s | \
+; RUN: FileCheck %s -check-prefix=ALL -check-prefix=GAS
+
+; The integrated assembler expands assembly macros before printing.
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mattr=mips16 \
+; RUN: -relocation-model=pic < %s | \
+; RUN: FileCheck %s -check-prefix=ALL -check-prefix=IAS
@x = external global float
@@ -11,11 +18,13 @@
store float %0, float* @x, align 4
ret void
}
-; picfp16: .ent __fn_stub_v_sf
-; picfp16: .cpload $25
-; picfp16: .set reorder
-; picfp16: .reloc 0, R_MIPS_NONE, v_sf
-; picfp16: la $25, $__fn_local_v_sf
-; picfp16: mfc1 $4, $f12
-; picfp16: jr $25
-; picfp16: .end __fn_stub_v_sf
+; ALL-LABEL: .ent __fn_stub_v_sf
+; ALL: .cpload $25
+; ALL: .set reorder
+; ALL: .reloc 0, R_MIPS_NONE, v_sf
+; GAS: la $25, $__fn_local_v_sf
+; IAS: lui $25, %hi($$__fn_local_v_sf)
+; IAS: addiu $25, $25, %lo($$__fn_local_v_sf)
+; ALL: mfc1 $4, $f12
+; ALL: jr $25
+; ALL: .end __fn_stub_v_sf
diff --git a/llvm/test/CodeGen/Mips/inlineasm-operand-code.ll b/llvm/test/CodeGen/Mips/inlineasm-operand-code.ll
index a9cc98e..8089308 100644
--- a/llvm/test/CodeGen/Mips/inlineasm-operand-code.ll
+++ b/llvm/test/CodeGen/Mips/inlineasm-operand-code.ll
@@ -5,6 +5,12 @@
; RUN: llc -no-integrated-as -march=mips -relocation-model=pic < %s | \
; RUN: FileCheck -check-prefix=ALL -check-prefix=BE32 -check-prefix=GAS %s
+; IAS might not print in the same way since it parses the assembly.
+; RUN: llc -march=mipsel -relocation-model=pic < %s | \
+; RUN: FileCheck -check-prefix=ALL -check-prefix=LE32 -check-prefix=IAS %s
+; RUN: llc -march=mips -relocation-model=pic < %s | \
+; RUN: FileCheck -check-prefix=ALL -check-prefix=BE32 -check-prefix=IAS %s
+
%union.u_tag = type { i64 }
%struct.anon = type { i32, i32 }
@uval = common global %union.u_tag zeroinitializer, align 8
@@ -15,6 +21,7 @@
; ALL-LABEL: constraint_X:
; ALL: #APP
; GAS: addiu ${{[0-9]+}}, ${{[0-9]+}}, 0xfffffffffffffffd
+; IAS: addiu ${{[0-9]+}}, ${{[0-9]+}}, -3
; ALL: #NO_APP
tail call i32 asm sideeffect "addiu $0, $1, ${2:X}", "=r,r,I"(i32 7, i32 -3) ;
ret i32 0
@@ -26,6 +33,9 @@
; ALL-LABEL: constraint_x:
; ALL: #APP
; GAS: addiu ${{[0-9]+}}, ${{[0-9]+}}, 0xfffd
+; This is _also_ -3 because uimm16 values are silently coerced to simm16 when
+; it would otherwise fail to match.
+; IAS: addiu ${{[0-9]+}}, ${{[0-9]+}}, -3
; ALL: #NO_APP
tail call i32 asm sideeffect "addiu $0, $1, ${2:x}", "=r,r,I"(i32 7, i32 -3) ;
ret i32 0
@@ -54,39 +64,66 @@
}
; z with -3
-define i32 @constraint_z() nounwind {
+define void @constraint_z_0() nounwind {
entry:
-; ALL-LABEL: constraint_z:
+; ALL-LABEL: constraint_z_0:
; ALL: #APP
; ALL: addiu ${{[0-9]+}}, ${{[0-9]+}}, -3
; ALL: #NO_APP
tail call i32 asm sideeffect "addiu $0, $1, ${2:z}", "=r,r,I"(i32 7, i32 -3) ;
+ ret void
+}
; z with 0
+define void @constraint_z_1() nounwind {
+entry:
+; ALL-LABEL: constraint_z_1:
; ALL: #APP
-; GAS: addiu ${{[0-9]+}}, ${{[0-9]+}}, $0
+; GAS: addu ${{[0-9]+}}, ${{[0-9]+}}, $0
+; IAS: move ${{[0-9]+}}, ${{[0-9]+}}
; ALL: #NO_APP
- tail call i32 asm sideeffect "addiu $0, $1, ${2:z}", "=r,r,I"(i32 7, i32 0) nounwind
+ tail call i32 asm sideeffect "addu $0, $1, ${2:z}", "=r,r,I"(i32 7, i32 0) nounwind
+ ret void
+}
; z with non-zero and the "r"(register) and "J"(integer zero) constraints
+define void @constraint_z_2() nounwind {
+entry:
+; ALL-LABEL: constraint_z_2:
; ALL: #APP
; ALL: mtc0 ${{[1-9][0-9]?}}, ${{[0-9]+}}
; ALL: #NO_APP
call void asm sideeffect "mtc0 ${0:z}, $$12", "Jr"(i32 7) nounwind
+ ret void
+}
; z with zero and the "r"(register) and "J"(integer zero) constraints
+define void @constraint_z_3() nounwind {
+entry:
+; ALL-LABEL: constraint_z_3:
; ALL: #APP
-; ALL: mtc0 $0, ${{[0-9]+}}
+; GAS: mtc0 $0, ${{[0-9]+}}
+; IAS: mtc0 $zero, ${{[0-9]+}}, 0
; ALL: #NO_APP
call void asm sideeffect "mtc0 ${0:z}, $$12", "Jr"(i32 0) nounwind
+ ret void
+}
; z with non-zero and just the "r"(register) constraint
+define void @constraint_z_4() nounwind {
+entry:
+; ALL-LABEL: constraint_z_4:
; ALL: #APP
; ALL: mtc0 ${{[1-9][0-9]?}}, ${{[0-9]+}}
; ALL: #NO_APP
call void asm sideeffect "mtc0 ${0:z}, $$12", "r"(i32 7) nounwind
+ ret void
+}
; z with zero and just the "r"(register) constraint
+define void @constraint_z_5() nounwind {
+entry:
+; ALL-LABEL: constraint_z_5:
; FIXME: Check for $0, instead of other registers.
; We should be using $0 directly in this case, not real registers.
; When the materialization of 0 gets fixed, this test will fail.
@@ -94,7 +131,7 @@
; ALL: mtc0 ${{[1-9][0-9]?}}, ${{[0-9]+}}
; ALL: #NO_APP
call void asm sideeffect "mtc0 ${0:z}, $$12", "r"(i32 0) nounwind
- ret i32 0
+ ret void
}
; A long long in 32 bit mode (use to assert)
diff --git a/llvm/test/CodeGen/Mips/inlineasm_constraint.ll b/llvm/test/CodeGen/Mips/inlineasm_constraint.ll
index a6ac071..e4c0023 100644
--- a/llvm/test/CodeGen/Mips/inlineasm_constraint.ll
+++ b/llvm/test/CodeGen/Mips/inlineasm_constraint.ll
@@ -1,5 +1,6 @@
; RUN: llc -no-integrated-as -march=mipsel < %s | \
; RUN: FileCheck %s -check-prefix=ALL -check-prefix=GAS
+; RUN: llc -march=mipsel < %s | FileCheck %s -check-prefix=ALL -check-prefix=IAS
define void @constraint_I() nounwind {
; First I with short
@@ -31,6 +32,7 @@
; Now K with 64
; ALL: #APP
; GAS: addu ${{[0-9]+}}, ${{[0-9]+}}, 64
+; IAS: addiu ${{[0-9]+}}, ${{[0-9]+}}, 64
; ALL: #NO_APP
tail call i16 asm sideeffect "addu $0, $1, $2\0A\09 ", "=r,r,K"(i16 7, i16 64) nounwind
ret void