[mips] Enable arithmetic and binary operations for the i128 data type.
Summary:
This patch adds support for some operations that were missing from
128-bit integer types (add/sub/mul/sdiv/udiv... etc.). With these
changes we can support the __int128_t and __uint128_t data types
from C/C++.
Depends on D7125
Reviewers: dsanders
Subscribers: llvm-commits
Differential Revision: http://reviews.llvm.org/D7143
llvm-svn: 227089
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/add.ll b/llvm/test/CodeGen/Mips/llvm-ir/add.ll
index 8890992..83774ed 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/add.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/add.ll
@@ -83,3 +83,33 @@
%r = add i64 %a, %b
ret i64 %r
}
+
+define signext i128 @add_i128(i128 signext %a, i128 signext %b) {
+entry:
+; ALL-LABEL: add_i128:
+
+ ; GP32: lw $[[T0:[0-9]+]], 28($sp)
+ ; GP32: addu $[[T1:[0-9]+]], $7, $[[T0]]
+ ; GP32: sltu $[[T2:[0-9]+]], $[[T1]], $[[T0]]
+ ; GP32: lw $[[T3:[0-9]+]], 24($sp)
+ ; GP32: addu $[[T4:[0-9]+]], $[[T2]], $[[T3]]
+ ; GP32: addu $[[T5:[0-9]+]], $6, $[[T4]]
+ ; GP32: sltu $[[T6:[0-9]+]], $[[T5]], $[[T3]]
+ ; GP32: lw $[[T7:[0-9]+]], 20($sp)
+ ; GP32: addu $[[T8:[0-9]+]], $[[T6]], $[[T7]]
+ ; GP32: lw $[[T9:[0-9]+]], 16($sp)
+ ; GP32: addu $3, $5, $[[T8]]
+ ; GP32: sltu $[[T10:[0-9]+]], $3, $[[T7]]
+ ; GP32: addu $[[T11:[0-9]+]], $[[T10]], $[[T9]]
+ ; GP32: addu $2, $4, $[[T11]]
+ ; GP32: move $4, $[[T5]]
+ ; GP32: move $5, $[[T1]]
+
+ ; GP64: daddu $3, $5, $7
+ ; GP64: sltu $[[T0:[0-9]+]], $3, $7
+ ; GP64: daddu $[[T1:[0-9]+]], $[[T0]], $6
+ ; GP64: daddu $2, $4, $[[T1]]
+
+ %r = add i128 %a, %b
+ ret i128 %r
+}
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/and.ll b/llvm/test/CodeGen/Mips/llvm-ir/and.ll
index 3cb4957..09d0ef9 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/and.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/and.ll
@@ -72,3 +72,23 @@
%r = and i64 %a, %b
ret i64 %r
}
+
+define signext i128 @and_i128(i128 signext %a, i128 signext %b) {
+entry:
+; ALL-LABEL: and_i128:
+
+ ; GP32: lw $[[T0:[0-9]+]], 24($sp)
+ ; GP32: lw $[[T1:[0-9]+]], 20($sp)
+ ; GP32: lw $[[T2:[0-9]+]], 16($sp)
+ ; GP32: and $2, $4, $[[T2]]
+ ; GP32: and $3, $5, $[[T1]]
+ ; GP32: and $4, $6, $[[T0]]
+ ; GP32: lw $[[T3:[0-9]+]], 28($sp)
+ ; GP32: and $5, $7, $[[T3]]
+
+ ; GP64: and $2, $4, $6
+ ; GP64: and $3, $5, $7
+
+ %r = and i128 %a, %b
+ ret i128 %r
+}
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/ashr.ll b/llvm/test/CodeGen/Mips/llvm-ir/ashr.ll
index b3f023a..8866b8b 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/ashr.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/ashr.ll
@@ -126,3 +126,63 @@
%r = ashr i64 %a, %b
ret i64 %r
}
+
+define signext i128 @ashr_i128(i128 signext %a, i128 signext %b) {
+entry:
+; ALL-LABEL: ashr_i128:
+
+ ; GP32: lw $25, %call16(__ashrti3)($gp)
+
+ ; M3: sll $[[T0:[0-9]+]], $7, 0
+ ; M3: dsrav $[[T1:[0-9]+]], $4, $[[T0]]
+ ; M3: andi $[[T2:[0-9]+]], $[[T0]], 32
+ ; M3: bnez $[[T3:[0-9]+]], $[[BB0:BB[0-9_]+]]
+ ; M3: move $3, $[[T1]]
+ ; M3: dsrlv $[[T4:[0-9]+]], $5, $[[T0]]
+ ; M3: dsll $[[T5:[0-9]+]], $4, 1
+ ; M3: not $[[T6:[0-9]+]], $[[T0]]
+ ; M3: dsllv $[[T7:[0-9]+]], $[[T5]], $[[T6]]
+ ; M3: or $3, $[[T7]], $[[T4]]
+ ; M3: $[[BB0]]:
+ ; M3: beqz $[[T3]], $[[BB1:BB[0-9_]+]]
+ ; M3: nop
+ ; M3: dsra $2, $4, 31
+ ; M3: $[[BB1]]:
+ ; M3: jr $ra
+ ; M3: nop
+
+ ; GP64-NOT-R6: sll $[[T0:[0-9]+]], $7, 0
+ ; GP64-NOT-R6: dsrlv $[[T1:[0-9]+]], $5, $[[T0]]
+ ; GP64-NOT-R6: dsll $[[T2:[0-9]+]], $4, 1
+ ; GP64-NOT-R6: not $[[T3:[0-9]+]], $[[T0]]
+ ; GP64-NOT-R6: dsllv $[[T4:[0-9]+]], $[[T2]], $[[T3]]
+ ; GP64-NOT-R6: or $3, $[[T4]], $[[T1]]
+ ; GP64-NOT-R6: dsrav $2, $4, $[[T0]]
+ ; GP64-NOT-R6: andi $[[T5:[0-9]+]], $[[T0]], 32
+
+ ; GP64-NOT-R6: movn $3, $2, $[[T5]]
+ ; GP64-NOT-R6: dsra $[[T6:[0-9]+]], $4, 31
+ ; GP64-NOT-R6: jr $ra
+ ; GP64-NOT-R6: movn $2, $[[T6]], $[[T5]]
+
+ ; 64R6: sll $[[T0:[0-9]+]], $7, 0
+ ; 64R6: dsrav $[[T1:[0-9]+]], $4, $[[T0]]
+ ; 64R6: andi $[[T2:[0-9]+]], $[[T0]], 32
+ ; 64R6: sll $[[T3:[0-9]+]], $[[T2]], 0
+ ; 64R6: seleqz $[[T4:[0-9]+]], $[[T1]], $[[T3]]
+ ; 64R6: dsra $[[T5:[0-9]+]], $4, 31
+ ; 64R6: selnez $[[T6:[0-9]+]], $[[T5]], $[[T3]]
+ ; 64R6: or $2, $[[T6]], $[[T4]]
+ ; 64R6: dsrlv $[[T7:[0-9]+]], $5, $[[T0]]
+ ; 64R6: dsll $[[T8:[0-9]+]], $4, 1
+ ; 64R6: not $[[T9:[0-9]+]], $[[T0]]
+ ; 64R6: dsllv $[[T10:[0-9]+]], $[[T8]], $[[T9]]
+ ; 64R6: or $[[T11:[0-9]+]], $[[T10]], $[[T7]]
+ ; 64R6: seleqz $[[T12:[0-9]+]], $[[T11]], $[[T3]]
+ ; 64R6: selnez $[[T13:[0-9]+]], $[[T1]], $[[T3]]
+ ; 64R6: jr $ra
+ ; 64R6: or $3, $[[T13]], $[[T12]]
+
+ %r = ashr i128 %a, %b
+ ret i128 %r
+}
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/lshr.ll b/llvm/test/CodeGen/Mips/llvm-ir/lshr.ll
index 8975c8c..1679678 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/lshr.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/lshr.ll
@@ -119,3 +119,58 @@
%r = lshr i64 %a, %b
ret i64 %r
}
+
+define signext i128 @lshr_i128(i128 signext %a, i128 signext %b) {
+entry:
+; ALL-LABEL: lshr_i128:
+
+ ; GP32: lw $25, %call16(__lshrti3)($gp)
+
+ ; M3: sll $[[T0:[0-9]+]], $7, 0
+ ; M3: dsrlv $[[T1:[0-9]+]], $4, $[[T0]]
+ ; M3: andi $[[T2:[0-9]+]], $[[T0]], 32
+ ; M3: bnez $[[T3:[0-9]+]], $[[BB0:BB[0-9_]+]]
+ ; M3: move $3, $[[T1]]
+ ; M3: dsrlv $[[T4:[0-9]+]], $5, $[[T0]]
+ ; M3: dsll $[[T5:[0-9]+]], $4, 1
+ ; M3: not $[[T6:[0-9]+]], $[[T0]]
+ ; M3: dsllv $[[T7:[0-9]+]], $[[T5]], $[[T6]]
+ ; M3: or $3, $[[T7]], $[[T4]]
+ ; M3: $[[BB0]]:
+ ; M3: bnez $[[T3]], $[[BB1:BB[0-9_]+]]
+ ; M3: daddiu $2, $zero, 0
+ ; M3: move $2, $[[T1]]
+ ; M3: $[[BB1]]:
+ ; M3: jr $ra
+ ; M3: nop
+
+ ; GP64-NOT-R6: sll $[[T0:[0-9]+]], $7, 0
+ ; GP64-NOT-R6: dsrlv $[[T1:[0-9]+]], $5, $[[T0]]
+ ; GP64-NOT-R6: dsll $[[T2:[0-9]+]], $4, 1
+ ; GP64-NOT-R6: not $[[T3:[0-9]+]], $[[T0]]
+ ; GP64-NOT-R6: dsllv $[[T4:[0-9]+]], $[[T2]], $[[T3]]
+ ; GP64-NOT-R6: or $3, $[[T4]], $[[T1]]
+ ; GP64-NOT-R6: dsrlv $2, $4, $[[T0]]
+ ; GP64-NOT-R6: andi $[[T5:[0-9]+]], $[[T0]], 32
+ ; GP64-NOT-R6: movn $3, $2, $[[T5]]
+ ; GP64-NOT-R6: jr $ra
+ ; GP64-NOT-R6: movn $2, $zero, $1
+
+ ; 64R6: sll $[[T0:[0-9]+]], $7, 0
+ ; 64R6: dsrlv $[[T1:[0-9]+]], $5, $[[T0]]
+ ; 64R6: dsll $[[T2:[0-9]+]], $4, 1
+ ; 64R6: not $[[T3:[0-9]+]], $[[T0]]
+ ; 64R6: dsllv $[[T4:[0-9]+]], $[[T2]], $[[T3]]
+ ; 64R6: or $[[T5:[0-9]+]], $[[T4]], $[[T1]]
+ ; 64R6: andi $[[T6:[0-9]+]], $[[T0]], 32
+ ; 64R6: sll $[[T7:[0-9]+]], $[[T6]], 0
+ ; 64R6: seleqz $[[T8:[0-9]+]], $[[T5]], $[[T7]]
+ ; 64R6: dsrlv $[[T9:[0-9]+]], $4, $[[T0]]
+ ; 64R6: selnez $[[T10:[0-9]+]], $[[T9]], $[[T7]]
+ ; 64R6: or $3, $[[T10]], $[[T8]]
+ ; 64R6: jr $ra
+ ; 64R6: seleqz $2, $[[T0]], $[[T7]]
+
+ %r = lshr i128 %a, %b
+ ret i128 %r
+}
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/mul.ll b/llvm/test/CodeGen/Mips/llvm-ir/mul.ll
index 1674124..5f7f338 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/mul.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/mul.ll
@@ -1,19 +1,19 @@
-; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
-; RUN: -check-prefix=ALL -check-prefix=M2
-; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
-; RUN: -check-prefix=ALL -check-prefix=32R1-R2 -check-prefix=32R1
-; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
-; RUN: -check-prefix=ALL -check-prefix=32R1-R2 -check-prefix=32R2
-; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
-; RUN: -check-prefix=ALL -check-prefix=32R6
-; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
-; RUN: -check-prefix=ALL -check-prefix=M4
-; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
-; RUN: -check-prefix=ALL -check-prefix=64R1-R2
-; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
-; RUN: -check-prefix=ALL -check-prefix=64R1-R2
-; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
-; RUN: -check-prefix=ALL -check-prefix=64R6
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s -check-prefix=ALL \
+; RUN: -check-prefix=M2 -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s -check-prefix=ALL \
+; RUN: -check-prefix=32R1-R2 -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s -check-prefix=ALL \
+; RUN: -check-prefix=32R1-R2 -check-prefix=32R2 -check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s -check-prefix=ALL \
+; RUN: -check-prefix=32R6 -check-prefix=GP32
+; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s -check-prefix=ALL \
+; RUN: -check-prefix=M4 -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s -check-prefix=ALL \
+; RUN: -check-prefix=64R1-R2 -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s -check-prefix=ALL \
+; RUN: -check-prefix=64R1-R2 -check-prefix=GP64 -check-prefix=GP64-NOT-R6
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s -check-prefix=ALL \
+; RUN: -check-prefix=64R6
define signext i1 @mul_i1(i1 signext %a, i1 signext %b) {
entry:
@@ -179,3 +179,30 @@
%r = mul i64 %a, %b
ret i64 %r
}
+
+define signext i128 @mul_i128(i128 signext %a, i128 signext %b) {
+entry:
+; ALL-LABEL: mul_i128:
+
+ ; GP32: lw $25, %call16(__multi3)($gp)
+
+ ; GP64-NOT-R6: dmult $4, $7
+ ; GP64-NOT-R6: mflo $[[T0:[0-9]+]]
+ ; GP64-NOT-R6: dmult $5, $6
+ ; GP64-NOT-R6: mflo $[[T1:[0-9]+]]
+ ; GP64-NOT-R6: dmultu $5, $7
+ ; GP64-NOT-R6: mflo $3
+ ; GP64-NOT-R6: mfhi $[[T2:[0-9]+]]
+ ; GP64-NOT-R6: daddu $[[T3:[0-9]+]], $[[T2]], $[[T1]]
+ ; GP64-NOT-R6: daddu $2, $[[T3:[0-9]+]], $[[T0]]
+
+ ; 64R6: dmul $[[T0:[0-9]+]], $5, $6
+ ; 64R6: dmuhu $[[T1:[0-9]+]], $5, $7
+ ; 64R6: daddu $[[T2:[0-9]+]], $[[T1]], $[[T0]]
+ ; 64R6: dmul $[[T3:[0-9]+]], $4, $7
+ ; 64R6: daddu $2, $[[T2]], $[[T3]]
+ ; 64R6: dmul $3, $5, $7
+
+ %r = mul i128 %a, %b
+ ret i128 %r
+}
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/or.ll b/llvm/test/CodeGen/Mips/llvm-ir/or.ll
index d641b6e..21d1d4f 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/or.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/or.ll
@@ -73,3 +73,23 @@
%r = or i64 %a, %b
ret i64 %r
}
+
+define signext i128 @or_i128(i128 signext %a, i128 signext %b) {
+entry:
+; ALL-LABEL: or_i128:
+
+ ; GP32: lw $[[T0:[0-9]+]], 24($sp)
+ ; GP32: lw $[[T1:[0-9]+]], 20($sp)
+ ; GP32: lw $[[T2:[0-9]+]], 16($sp)
+ ; GP32: or $2, $4, $[[T2]]
+ ; GP32: or $3, $5, $[[T1]]
+ ; GP32: or $4, $6, $[[T0]]
+ ; GP32: lw $[[T3:[0-9]+]], 28($sp)
+ ; GP32: or $5, $7, $[[T3]]
+
+ ; GP64: or $2, $4, $6
+ ; GP64: or $3, $5, $7
+
+ %r = or i128 %a, %b
+ ret i128 %r
+}
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/sdiv.ll b/llvm/test/CodeGen/Mips/llvm-ir/sdiv.ll
index 9b93fcf..54b7f70 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/sdiv.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/sdiv.ll
@@ -121,3 +121,16 @@
%r = sdiv i64 %a, %b
ret i64 %r
}
+
+define signext i128 @sdiv_i128(i128 signext %a, i128 signext %b) {
+entry:
+ ; ALL-LABEL: sdiv_i128:
+
+ ; GP32: lw $25, %call16(__divti3)($gp)
+
+ ; GP64-NOT-R6: ld $25, %call16(__divti3)($gp)
+ ; 64R6: ld $25, %call16(__divti3)($gp)
+
+ %r = sdiv i128 %a, %b
+ ret i128 %r
+}
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/shl.ll b/llvm/test/CodeGen/Mips/llvm-ir/shl.ll
index a3948fe..072b44a 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/shl.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/shl.ll
@@ -131,3 +131,58 @@
%r = shl i64 %a, %b
ret i64 %r
}
+
+define signext i128 @shl_i128(i128 signext %a, i128 signext %b) {
+entry:
+; ALL-LABEL: shl_i128:
+
+ ; GP32: lw $25, %call16(__ashlti3)($gp)
+
+ ; M3: sll $[[T0:[0-9]+]], $7, 0
+ ; M3: dsllv $[[T1:[0-9]+]], $5, $[[T0]]
+ ; M3: andi $[[T2:[0-9]+]], $[[T0]], 32
+ ; M3: bnez $[[T3:[0-9]+]], $[[BB0:BB[0-9_]+]]
+ ; M3: move $2, $[[T1]]
+ ; M3: dsllv $[[T4:[0-9]+]], $4, $[[T0]]
+ ; M3: dsrl $[[T5:[0-9]+]], $5, 1
+ ; M3: not $[[T6:[0-9]+]], $[[T0]]
+ ; M3: dsrlv $[[T7:[0-9]+]], $[[T5]], $[[T6]]
+ ; M3: or $2, $[[T4]], $[[T7]]
+ ; M3: $[[BB0]]:
+ ; M3: bnez $[[T3]], $[[BB1:BB[0-9_]+]]
+ ; M3: daddiu $3, $zero, 0
+ ; M3: move $3, $[[T1]]
+ ; M3: $[[BB1]]:
+ ; M3: jr $ra
+ ; M3: nop
+
+ ; GP64-NOT-R6: sll $[[T0:[0-9]+]], $7, 0
+ ; GP64-NOT-R6: dsllv $[[T1:[0-9]+]], $4, $[[T0]]
+ ; GP64-NOT-R6: dsrl $[[T2:[0-9]+]], $5, 1
+ ; GP64-NOT-R6: not $[[T3:[0-9]+]], $[[T0]]
+ ; GP64-NOT-R6: dsrlv $[[T4:[0-9]+]], $[[T2]], $[[T3]]
+ ; GP64-NOT-R6: or $2, $[[T1]], $[[T4]]
+ ; GP64-NOT-R6: dsllv $3, $5, $[[T0]]
+ ; GP64-NOT-R6: andi $[[T5:[0-9]+]], $[[T0]], 32
+ ; GP64-NOT-R6: movn $2, $3, $[[T5]]
+ ; GP64-NOT-R6: jr $ra
+ ; GP64-NOT-R6: movn $3, $zero, $1
+
+ ; 64R6: sll $[[T0:[0-9]+]], $7, 0
+ ; 64R6: dsllv $[[T1:[0-9]+]], $4, $[[T0]]
+ ; 64R6: dsrl $[[T2:[0-9]+]], $5, 1
+ ; 64R6: not $[[T3:[0-9]+]], $[[T0]]
+ ; 64R6: dsrlv $[[T4:[0-9]+]], $[[T2]], $[[T3]]
+ ; 64R6: or $[[T5:[0-9]+]], $[[T1]], $[[T4]]
+ ; 64R6: andi $[[T6:[0-9]+]], $[[T0]], 32
+ ; 64R6: sll $[[T7:[0-9]+]], $[[T6]], 0
+ ; 64R6: seleqz $[[T8:[0-9]+]], $[[T5]], $[[T7]]
+ ; 64R6: dsllv $[[T9:[0-9]+]], $5, $[[T0]]
+ ; 64R6: selnez $[[T10:[0-9]+]], $[[T9]], $[[T7]]
+ ; 64R6: or $2, $[[T10]], $[[T8]]
+ ; 64R6: jr $ra
+ ; 64R6: seleqz $3, $[[T0]], $[[T7]]
+
+ %r = shl i128 %a, %b
+ ret i128 %r
+}
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/srem.ll b/llvm/test/CodeGen/Mips/llvm-ir/srem.ll
index 8ae7b07..1e949d2 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/srem.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/srem.ll
@@ -114,3 +114,16 @@
%r = srem i64 %a, %b
ret i64 %r
}
+
+define signext i128 @srem_i128(i128 signext %a, i128 signext %b) {
+entry:
+; ALL-LABEL: srem_i128:
+
+ ; GP32: lw $25, %call16(__modti3)($gp)
+
+ ; GP64-NOT-R6: ld $25, %call16(__modti3)($gp)
+ ; 64-R6: ld $25, %call16(__modti3)($gp)
+
+ %r = srem i128 %a, %b
+ ret i128 %r
+}
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/sub.ll b/llvm/test/CodeGen/Mips/llvm-ir/sub.ll
index 7f2238d..6d592be 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/sub.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/sub.ll
@@ -83,3 +83,32 @@
%r = sub i64 %a, %b
ret i64 %r
}
+
+define signext i128 @sub_i128(i128 signext %a, i128 signext %b) {
+entry:
+; ALL-LABEL: sub_i128:
+
+ ; GP32: lw $[[T0:[0-9]+]], 20($sp)
+ ; GP32: sltu $[[T1:[0-9]+]], $5, $[[T0]]
+ ; GP32: lw $[[T2:[0-9]+]], 16($sp)
+ ; GP32: addu $[[T3:[0-9]+]], $[[T1]], $[[T2]]
+ ; GP32: lw $[[T4:[0-9]+]], 24($sp)
+ ; GP32: lw $[[T5:[0-9]+]], 28($sp)
+ ; GP32: subu $[[T6:[0-9]+]], $7, $[[T5]]
+ ; GP32: subu $2, $4, $[[T3]]
+ ; GP32: sltu $[[T8:[0-9]+]], $6, $[[T4]]
+ ; GP32: addu $[[T9:[0-9]+]], $[[T8]], $[[T0]]
+ ; GP32: subu $3, $5, $[[T9]]
+ ; GP32: sltu $[[T10:[0-9]+]], $7, $[[T5]]
+ ; GP32: addu $[[T11:[0-9]+]], $[[T10]], $[[T4]]
+ ; GP32: subu $4, $6, $[[T11]]
+ ; GP32: move $5, $[[T6]]
+
+ ; GP64: dsubu $3, $5, $7
+ ; GP64: sltu $[[T0:[0-9]+]], $5, $7
+ ; GP64: daddu $[[T1:[0-9]+]], $[[T0]], $6
+ ; GP64: dsubu $2, $4, $[[T1]]
+
+ %r = sub i128 %a, %b
+ ret i128 %r
+}
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/udiv.ll b/llvm/test/CodeGen/Mips/llvm-ir/udiv.ll
index 1603424..1f7aa0d 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/udiv.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/udiv.ll
@@ -93,3 +93,16 @@
%r = udiv i64 %a, %b
ret i64 %r
}
+
+define signext i128 @udiv_i128(i128 signext %a, i128 signext %b) {
+entry:
+; ALL-LABEL: udiv_i128:
+
+ ; GP32: lw $25, %call16(__udivti3)($gp)
+
+ ; GP64-NOT-R6: ld $25, %call16(__udivti3)($gp)
+ ; 64-R6: ld $25, %call16(__udivti3)($gp)
+
+ %r = udiv i128 %a, %b
+ ret i128 %r
+}
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/urem.ll b/llvm/test/CodeGen/Mips/llvm-ir/urem.ll
index 86c4605..7323534 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/urem.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/urem.ll
@@ -130,3 +130,16 @@
%r = urem i64 %a, %b
ret i64 %r
}
+
+define signext i128 @urem_i128(i128 signext %a, i128 signext %b) {
+entry:
+ ; ALL-LABEL: urem_i128:
+
+ ; GP32: lw $25, %call16(__umodti3)($gp)
+
+ ; GP64-NOT-R6: ld $25, %call16(__umodti3)($gp)
+ ; 64-R6: ld $25, %call16(__umodti3)($gp)
+
+ %r = urem i128 %a, %b
+ ret i128 %r
+}
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/xor.ll b/llvm/test/CodeGen/Mips/llvm-ir/xor.ll
index 69d3614..94dead1 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/xor.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/xor.ll
@@ -72,3 +72,23 @@
%r = xor i64 %a, %b
ret i64 %r
}
+
+define signext i128 @xor_i128(i128 signext %a, i128 signext %b) {
+entry:
+; ALL-LABEL: xor_i128:
+
+ ; GP32: lw $[[T0:[0-9]+]], 24($sp)
+ ; GP32: lw $[[T1:[0-9]+]], 20($sp)
+ ; GP32: lw $[[T2:[0-9]+]], 16($sp)
+ ; GP32: xor $2, $4, $[[T2]]
+ ; GP32: xor $3, $5, $[[T1]]
+ ; GP32: xor $4, $6, $[[T0]]
+ ; GP32: lw $[[T3:[0-9]+]], 28($sp)
+ ; GP32: xor $5, $7, $[[T3]]
+
+ ; GP64: xor $2, $4, $6
+ ; GP64: xor $3, $5, $7
+
+ %r = xor i128 %a, %b
+ ret i128 %r
+}