Revert "[mips] Fix multiprecision arithmetic."

This reverts commit r305389. This broke chromium builds, so reverting
while I investigate further.

llvm-svn: 306741
diff --git a/llvm/test/CodeGen/Mips/2008-06-05-Carry.ll b/llvm/test/CodeGen/Mips/2008-06-05-Carry.ll
index 5e6092f..c61e1cd 100644
--- a/llvm/test/CodeGen/Mips/2008-06-05-Carry.ll
+++ b/llvm/test/CodeGen/Mips/2008-06-05-Carry.ll
@@ -2,21 +2,20 @@
 
 define i64 @add64(i64 %u, i64 %v) nounwind  {
 entry:
-; CHECK-LABEL: add64:
 ; CHECK: addu
-; CHECK-DAG: sltu
-; CHECK-DAG: addu
+; CHECK: sltu 
 ; CHECK: addu
-  %tmp2 = add i64 %u, %v
+; CHECK: addu
+  %tmp2 = add i64 %u, %v  
   ret i64 %tmp2
 }
 
 define i64 @sub64(i64 %u, i64 %v) nounwind  {
 entry:
-; CHECK-LABEL: sub64
-; CHECK-DAG: sltu
-; CHECK-DAG: subu
+; CHECK: sub64
 ; CHECK: subu
+; CHECK: sltu 
+; CHECK: addu
 ; CHECK: subu
   %tmp2 = sub i64 %u, %v
   ret i64 %tmp2
diff --git a/llvm/test/CodeGen/Mips/dsp-patterns.ll b/llvm/test/CodeGen/Mips/dsp-patterns.ll
index 250d3ef..837c0d8 100644
--- a/llvm/test/CodeGen/Mips/dsp-patterns.ll
+++ b/llvm/test/CodeGen/Mips/dsp-patterns.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=mips -mcpu=mips32r2 -mattr=dsp < %s | FileCheck %s -check-prefix=R1
-; RUN: llc -march=mips -mcpu=mips32r2 -mattr=dspr2 < %s | FileCheck %s -check-prefix=R2
+; RUN: llc -march=mips -mattr=dsp < %s | FileCheck %s -check-prefix=R1
+; RUN: llc -march=mips -mattr=dspr2 < %s | FileCheck %s -check-prefix=R2
 
 ; R1-LABEL: test_lbux:
 ; R1: lbux ${{[0-9]+}}
diff --git a/llvm/test/CodeGen/Mips/llcarry.ll b/llvm/test/CodeGen/Mips/llcarry.ll
index b7cc6fc..fcf1294 100644
--- a/llvm/test/CodeGen/Mips/llcarry.ll
+++ b/llvm/test/CodeGen/Mips/llcarry.ll
@@ -14,9 +14,9 @@
   %add = add nsw i64 %1, %0
   store i64 %add, i64* @k, align 8
 ; 16:	addu	${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
-; 16:	addu	${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
 ; 16:	sltu	${{[0-9]+}}, ${{[0-9]+}}
-; 16:	move	${{[0-9]+}}, $24
+; 16:	move	${{[0-9]+}}, $t8
+; 16:	addu	${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
 ; 16:	addu	${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
   ret void
 }
@@ -28,8 +28,8 @@
   %sub = sub nsw i64 %0, %1
 ; 16:	subu	${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
 ; 16:	sltu	${{[0-9]+}}, ${{[0-9]+}}
-; 16:	move	${{[0-9]+}}, $24
-; 16:	subu	${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
+; 16:	move	${{[0-9]+}}, $t8
+; 16:	addu	${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
 ; 16:	subu	${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
   store i64 %sub, i64* @l, align 8
   ret void
@@ -41,7 +41,8 @@
   %add = add nsw i64 %0, 15
 ; 16:	addiu	${{[0-9]+}}, 15
 ; 16:	sltu	${{[0-9]+}}, ${{[0-9]+}}
-; 16:	move	${{[0-9]+}}, $24
+; 16:	move	${{[0-9]+}}, $t8
+; 16:	addu	${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
 ; 16:	addu	${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
   store i64 %add, i64* @m, align 8
   ret void
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/add.ll b/llvm/test/CodeGen/Mips/llvm-ir/add.ll
index 63884eb..a5ecdda 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/add.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/add.ll
@@ -1,35 +1,35 @@
 ; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
-; RUN:    -check-prefixes=ALL,NOT-R2-R6,GP32,PRE4
+; RUN:    -check-prefixes=ALL,NOT-R2-R6,GP32
 ; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
-; RUN:    -check-prefixes=ALL,NOT-R2-R6,GP32,GP32-CMOV
+; RUN:    -check-prefixes=ALL,NOT-R2-R6,GP32
 ; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
-; RUN:    -check-prefixes=ALL,R2-R6,GP32,GP32-CMOV
+; RUN:    -check-prefixes=ALL,R2-R6,GP32
 ; RUN: llc < %s -march=mips -mcpu=mips32r3 | FileCheck %s \
-; RUN:    -check-prefixes=ALL,R2-R6,GP32,GP32-CMOV
+; RUN:    -check-prefixes=ALL,R2-R6,GP32
 ; RUN: llc < %s -march=mips -mcpu=mips32r5 | FileCheck %s \
-; RUN:    -check-prefixes=ALL,R2-R6,GP32,GP32-CMOV
+; RUN:    -check-prefixes=ALL,R2-R6,GP32
 ; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
 ; RUN:    -check-prefixes=ALL,R2-R6,GP32
 ; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
-; RUN:    -check-prefixes=ALL,NOT-R2-R6,GP64,GP64-NOT-R2-R6
+; RUN:    -check-prefixes=ALL,NOT-R2-R6,GP64
 ; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
-; RUN:    -check-prefixes=ALL,NOT-R2-R6,GP64,GP64-NOT-R2-R6
+; RUN:    -check-prefixes=ALL,NOT-R2-R6,GP64
 ; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
-; RUN:    -check-prefixes=ALL,NOT-R2-R6,GP64,GP64-NOT-R2-R6
+; RUN:    -check-prefixes=ALL,NOT-R2-R6,GP64
 ; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
-; RUN:    -check-prefixes=ALL,R2-R6,GP64,GP64-R2-R6
+; RUN:    -check-prefixes=ALL,R2-R6,GP64
 ; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \
-; RUN:    -check-prefixes=ALL,R2-R6,GP64,GP64-R2-R6
+; RUN:    -check-prefixes=ALL,R2-R6,GP64
 ; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s \
-; RUN:    -check-prefixes=ALL,R2-R6,GP64,GP64-R2-R6
+; RUN:    -check-prefixes=ALL,R2-R6,GP64
 ; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
-; RUN:    -check-prefixes=ALL,R2-R6,GP64,GP64-R2-R6
+; RUN:    -check-prefixes=ALL,R2-R6,GP64
 ; RUN: llc < %s -march=mips -mcpu=mips32r3 -mattr=+micromips -O2 -verify-machineinstrs | FileCheck %s \
-; RUN:    -check-prefixes=ALL,MMR3,MM32
+; RUN:    -check-prefixes=ALL,MMR6,MM32
 ; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips -O2 | FileCheck %s \
 ; RUN:    -check-prefixes=ALL,MMR6,MM32
 ; RUN: llc < %s -march=mips -mcpu=mips64r6 -target-abi n64 -mattr=+micromips -O2 | FileCheck %s \
-; RUN:    -check-prefixes=ALL,MM64
+; RUN:    -check-prefixes=ALL,MMR6,MM64
 
 
 ; FIXME: This code sequence is inefficient as it should be 'subu $[[T0]], $zero, $[[T0]'. 
@@ -110,17 +110,17 @@
 entry:
 ; ALL-LABEL: add_i64:
 
-  ; GP32-DAG:   addu    $[[T0:[0-9]+]], $4, $6
-  ; GP32-DAG:   addu    $3, $5, $7
-  ; GP32:       sltu    $[[T1:[0-9]+]], $3, $5
-  ; GP32:       addu    $2, $[[T0]], $[[T1]]
+  ; GP32:       addu    $3, $5, $7
+  ; GP32:       sltu    $[[T0:[0-9]+]], $3, $7
+  ; GP32:       addu    $[[T1:[0-9]+]], $[[T0]], $6
+  ; GP32:       addu    $2, $4, $[[T1]]
 
   ; GP64:       daddu   $2, $4, $5
 
-  ; MM32-DAG:   addu16  $3, $5, $7
-  ; MM32-DAG:   addu16  $[[T0:[0-9]+]], $4, $6
-  ; MM32:       sltu    $[[T1:[0-9]+]], $3, $5
-  ; MM32:       addu16  $2, $[[T0]], $[[T1]]
+  ; MM32:       addu16  $3, $5, $7
+  ; MM32:       sltu    $[[T0:[0-9]+]], $3, $7
+  ; MM32:       addu    $[[T1:[0-9]+]], $[[T0]], $6
+  ; MM32:       addu    $2, $4, $[[T1]]
 
   ; MM64:       daddu   $2, $4, $5
 
@@ -132,108 +132,49 @@
 entry:
 ; ALL-LABEL: add_i128:
 
-  ; PRE4:       move    $[[R1:[0-9]+]], $5
-  ; PRE4:       move    $[[R2:[0-9]+]], $4
-  ; PRE4:       lw   $[[R3:[0-9]+]], 24($sp)
-  ; PRE4:       addu   $[[R4:[0-9]+]], $6, $[[R3]]
-  ; PRE4:       lw   $[[R5:[0-9]+]], 28($sp)
-  ; PRE4:       addu   $[[R6:[0-9]+]], $7, $[[R5]]
-  ; PRE4:       sltu   $[[R7:[0-9]+]], $[[R6]], $7
-  ; PRE4:       addu   $[[R8:[0-9]+]], $[[R4]], $[[R7]]
-  ; PRE4:       xor   $[[R9:[0-9]+]], $[[R8]], $6
-  ; PRE4:       sltiu   $[[R10:[0-9]+]], $[[R9]], 1
-  ; PRE4:       bnez   $[[R10]], $BB5_2
-  ; PRE4:       sltu   $[[R7]], $[[R8]], $6
-  ; PRE4:       lw   $[[R12:[0-9]+]], 20($sp)
-  ; PRE4:       addu   $[[R13:[0-9]+]], $[[R1]], $[[R12]]
-  ; PRE4:       lw   $[[R14:[0-9]+]], 16($sp)
-  ; PRE4:       addu   $[[R15:[0-9]+]], $[[R13]], $[[R7]]
-  ; PRE4:       addu   $[[R16:[0-9]+]], $[[R2]], $[[R14]]
-  ; PRE4:       sltu   $[[R17:[0-9]+]], $[[R15]], $[[R13]]
-  ; PRE4:       sltu   $[[R18:[0-9]+]], $[[R13]], $[[R1]]
-  ; PRE4:       addu   $[[R19:[0-9]+]], $[[R16]], $[[R18]]
-  ; PRE4:       addu   $2, $[[R19]], $[[R17]]
+  ; GP32:       lw        $[[T0:[0-9]+]], 28($sp)
+  ; GP32:       addu      $[[T1:[0-9]+]], $7, $[[T0]]
+  ; GP32:       sltu      $[[T2:[0-9]+]], $[[T1]], $[[T0]]
+  ; GP32:       lw        $[[T3:[0-9]+]], 24($sp)
+  ; GP32:       addu      $[[T4:[0-9]+]], $[[T2]], $[[T3]]
+  ; GP32:       addu      $[[T5:[0-9]+]], $6, $[[T4]]
+  ; GP32:       sltu      $[[T6:[0-9]+]], $[[T5]], $[[T3]]
+  ; GP32:       lw        $[[T7:[0-9]+]], 20($sp)
+  ; GP32:       addu      $[[T8:[0-9]+]], $[[T6]], $[[T7]]
+  ; GP32:       lw        $[[T9:[0-9]+]], 16($sp)
+  ; GP32:       addu      $3, $5, $[[T8]]
+  ; GP32:       sltu      $[[T10:[0-9]+]], $3, $[[T7]]
+  ; GP32:       addu      $[[T11:[0-9]+]], $[[T10]], $[[T9]]
+  ; GP32:       addu      $2, $4, $[[T11]]
+  ; GP32:       move      $4, $[[T5]]
+  ; GP32:       move      $5, $[[T1]]
 
-  ; GP32-CMOV:  lw        $[[T0:[0-9]+]], 24($sp)
-  ; GP32-CMOV:  addu      $[[T1:[0-9]+]], $6, $[[T0]]
-  ; GP32-CMOV:  lw        $[[T2:[0-9]+]], 28($sp)
-  ; GP32-CMOV:  addu      $[[T3:[0-9]+]], $7, $[[T2]]
-  ; GP32-CMOV:  sltu      $[[T4:[0-9]+]], $[[T3]], $7
-  ; GP32-CMOV:  addu      $[[T5:[0-9]+]], $[[T1]], $[[T4]]
-  ; GP32-CMOV:  sltu      $[[T6:[0-9]+]], $[[T5]], $6
-  ; GP32-CMOV:  xor       $[[T7:[0-9]+]], $[[T5]], $6
-  ; GP32-CMOV:  movz      $[[T8:[0-9]+]], $[[T4]], $[[T7]]
-  ; GP32-CMOV:  lw        $[[T9:[0-9]+]], 20($sp)
-  ; GP32-CMOV:  addu      $[[T10:[0-9]+]], $5, $[[T4]]
-  ; GP32-CMOV:  addu      $[[T11:[0-9]+]], $[[T10]], $[[T8]]
-  ; GP32-CMOV:  lw        $[[T12:[0-9]+]], 16($sp)
-  ; GP32-CMOV:  sltu      $[[T13:[0-9]+]], $[[T11]], $[[T10]]
-  ; GP32-CMOV:  addu      $[[T14:[0-9]+]], $4, $[[T12]]
-  ; GP32-CMOV:  sltu      $[[T15:[0-9]+]], $[[T10]], $5
-  ; GP32-CMOV:  addu      $[[T16:[0-9]+]], $[[T14]], $[[T15]]
-  ; GP32-CMOV:  addu      $[[T17:[0-9]+]], $[[T16]], $[[T13]]
-  ; GP32-CMOV:  move      $4, $[[T5]]
-  ; GP32-CMOV:  move      $5, $[[T3]]
+  ; GP64:       daddu     $3, $5, $7
+  ; GP64:       sltu      $[[T0:[0-9]+]], $3, $7
+  ; GP64:       daddu     $[[T1:[0-9]+]], $[[T0]], $6
+  ; GP64:       daddu     $2, $4, $[[T1]]
 
-  ; GP64:           daddu   $[[T0:[0-9]+]], $4, $6
-  ; GP64:           daddu   $[[T1:[0-9]+]], $5, $7
-  ; GP64:           sltu    $[[T2:[0-9]+]], $[[T1]], $5
-  ; GP64-NOT-R2-R6: dsll    $[[T3:[0-9]+]], $[[T2]], 32
-  ; GP64-NOT-R2-R6: dsrl    $[[T4:[0-9]+]], $[[T3]], 32
-  ; GP64-R2-R6:     dext    $[[T4:[0-9]+]], $[[T2]], 0, 32
+  ; MM32:       lw        $[[T0:[0-9]+]], 28($sp)
+  ; MM32:       addu      $[[T1:[0-9]+]], $7, $[[T0]]
+  ; MM32:       sltu      $[[T2:[0-9]+]], $[[T1]], $[[T0]]
+  ; MM32:       lw        $[[T3:[0-9]+]], 24($sp)
+  ; MM32:       addu16    $[[T4:[0-9]+]], $[[T2]], $[[T3]]
+  ; MM32:       addu16    $[[T5:[0-9]+]], $6, $[[T4]]
+  ; MM32:       sltu      $[[T6:[0-9]+]], $[[T5]], $[[T3]]
+  ; MM32:       lw        $[[T7:[0-9]+]], 20($sp)
+  ; MM32:       addu16    $[[T8:[0-9]+]], $[[T6]], $[[T7]]
+  ; MM32:       lw        $[[T9:[0-9]+]], 16($sp)
+  ; MM32:       addu16    $[[T10:[0-9]+]], $5, $[[T8]]
+  ; MM32:       sltu      $[[T11:[0-9]+]], $[[T10]], $[[T7]]
+  ; MM32:       addu      $[[T12:[0-9]+]], $[[T11]], $[[T9]]
+  ; MM32:       addu16    $[[T13:[0-9]+]], $4, $[[T12]]
+  ; MM32:       move      $4, $[[T5]]
+  ; MM32:       move      $5, $[[T1]]
 
-  ; GP64:           daddu   $2, $[[T0]], $[[T4]]
-
-  ; MMR3:       move      $[[T1:[0-9]+]], $5
-  ; MMR3-DAG:   lw        $[[T2:[0-9]+]], 32($sp)
-  ; MMR3:       addu16    $[[T3:[0-9]+]], $6, $[[T2]]
-  ; MMR3-DAG:   lw        $[[T4:[0-9]+]], 36($sp)
-  ; MMR3:       addu16    $[[T5:[0-9]+]], $7, $[[T4]]
-  ; MMR3:       sltu      $[[T6:[0-9]+]], $[[T5]], $7
-  ; MMR3:       addu16    $[[T7:[0-9]+]], $[[T3]], $[[T6]]
-  ; MMR3:       sltu      $[[T8:[0-9]+]], $[[T7]], $6
-  ; MMR3:       xor       $[[T9:[0-9]+]], $[[T7]], $6
-  ; MMR3:       movz      $[[T8]], $[[T6]], $[[T9]]
-  ; MMR3:       lw        $[[T10:[0-9]+]], 28($sp)
-  ; MMR3:       addu16    $[[T11:[0-9]+]], $[[T1]], $[[T10]]
-  ; MMR3:       addu16    $[[T12:[0-9]+]], $[[T11]], $[[T8]]
-  ; MMR3:       lw        $[[T13:[0-9]+]], 24($sp)
-  ; MMR3:       sltu      $[[T14:[0-9]+]], $[[T12]], $[[T11]]
-  ; MMR3:       addu16    $[[T15:[0-9]+]], $4, $[[T13]]
-  ; MMR3:       sltu      $[[T16:[0-9]+]], $[[T11]], $[[T1]]
-  ; MMR3:       addu16    $[[T17:[0-9]+]], $[[T15]], $[[T16]]
-  ; MMR3:       addu16    $2, $2, $[[T14]]
-
-  ; MMR6:        move      $[[T1:[0-9]+]], $5
-  ; MMR6:        move      $[[T2:[0-9]+]], $4
-  ; MMR6:        lw        $[[T3:[0-9]+]], 32($sp)
-  ; MMR6:        addu16    $[[T4:[0-9]+]], $6, $[[T3]]
-  ; MMR6:        lw        $[[T5:[0-9]+]], 36($sp)
-  ; MMR6:        addu16    $[[T6:[0-9]+]], $7, $[[T5]]
-  ; MMR6:        sltu      $[[T7:[0-9]+]], $[[T6]], $7
-  ; MMR6:        addu16    $[[T8:[0-9]+]], $[[T4]], $7
-  ; MMR6:        sltu      $[[T9:[0-9]+]], $[[T8]], $6
-  ; MMR6:        xor       $[[T10:[0-9]+]], $[[T4]], $6
-  ; MMR6:        sltiu     $[[T11:[0-9]+]], $[[T10]], 1
-  ; MMR6:        seleqz    $[[T12:[0-9]+]], $[[T9]], $[[T11]]
-  ; MMR6:        selnez    $[[T13:[0-9]+]], $[[T7]], $[[T11]]
-  ; MMR6:        lw        $[[T14:[0-9]+]], 24($sp)
-  ; MMR6:        or        $[[T15:[0-9]+]], $[[T13]], $[[T12]]
-  ; MMR6:        addu16    $[[T16:[0-9]+]], $[[T2]], $[[T14]]
-  ; MMR6:        lw        $[[T17:[0-9]+]], 28($sp)
-  ; MMR6:        addu16    $[[T18:[0-9]+]], $[[T1]], $[[T17]]
-  ; MMR6:        addu16    $[[T19:[0-9]+]], $[[T18]], $[[T15]]
-  ; MMR6:        sltu      $[[T20:[0-9]+]], $[[T18]], $[[T1]]
-  ; MMR6:        sltu      $[[T21:[0-9]+]], $[[T17]], $[[T18]]
-  ; MMR6:        addu16    $2, $[[T16]], $[[T20]]
-  ; MMR6:        addu16    $2, $[[T20]], $[[T21]]
-
-  ; MM64:       daddu     $[[T0:[0-9]+]], $4, $6
   ; MM64:       daddu     $3, $5, $7
-  ; MM64:       sltu      $[[T1:[0-9]+]], $3, $5
-  ; MM64:       dsll      $[[T2:[0-9]+]], $[[T1]], 32
-  ; MM64:       dsrl      $[[T3:[0-9]+]], $[[T2]], 32
-  ; MM64:       daddu     $2, $[[T0]], $[[T3]]
+  ; MM64:       sltu      $[[T0:[0-9]+]], $3, $7
+  ; MM64:       daddu     $[[T1:[0-9]+]], $[[T0]], $6
+  ; MM64:       daddu     $2, $4, $[[T1]]
 
   %r = add i128 %a, %b
   ret i128 %r
@@ -308,16 +249,17 @@
 define signext i64 @add_i64_4(i64 signext %a) {
 ; ALL-LABEL: add_i64_4:
 
-  ; GP32:       addiu   $3, $5, 4
-  ; GP32:       sltu    $[[T0:[0-9]+]], $3, $5
-  ; GP32:       addu    $2, $4, $[[T0]]
-
-  ; MM32:       addiur2 $[[T1:[0-9]+]], $5, 4
-  ; MM32:       sltu    $[[T2:[0-9]+]], $[[T1]], $5
-  ; MM32:       addu16  $2, $4, $[[T2]]
+  ; GP32:       addiu   $[[T0:[0-9]+]], $5, 4
+  ; GP32:       addiu   $[[T1:[0-9]+]], $zero, 4
+  ; GP32:       sltu    $[[T1]], $[[T0]], $[[T1]]
+  ; GP32:       addu    $2, $4, $[[T1]]
 
   ; GP64:       daddiu  $2, $4, 4
 
+  ; MM32:       addiu   $[[T0:[0-9]+]], $5, 4
+  ; MM32:       li16    $[[T1:[0-9]+]], 4
+  ; MM32:       sltu    $[[T2:[0-9]+]], $[[T0]], $[[T1]]
+  ; MM32:       addu    $2, $4, $[[T2]]
 
   ; MM64:       daddiu  $2, $4, 4
 
@@ -328,67 +270,38 @@
 define signext i128 @add_i128_4(i128 signext %a) {
 ; ALL-LABEL: add_i128_4:
 
-  ; PRE4:       move   $[[T0:[0-9]+]], $5
-  ; PRE4:       addiu  $[[T1:[0-9]+]], $7, 4
-  ; PRE4:       sltu   $[[T2:[0-9]+]], $[[T1]], $7
-  ; PRE4:       xori   $[[T3:[0-9]+]], $[[T2]], 1
-  ; PRE4:       bnez   $[[T3]], $BB[[BB0:[0-9_]+]]
-  ; PRE4:       addu   $[[T4:[0-9]+]], $6, $[[T2]]
-  ; PRE4:       sltu   $[[T5:[0-9]+]], $[[T4]], $6
-  ; PRE4;       $BB[[BB0:[0-9]+]]:
-  ; PRE4:       addu   $[[T6:[0-9]+]], $[[T0]], $[[T5]]
-  ; PRE4:       sltu   $[[T7:[0-9]+]], $[[T6]], $[[T0]]
-  ; PRE4:       addu   $[[T8:[0-9]+]], $4, $[[T7]]
-  ; PRE4:       move    $4, $[[T4]]
+  ; GP32:       addiu   $[[T0:[0-9]+]], $7, 4
+  ; GP32:       addiu   $[[T1:[0-9]+]], $zero, 4
+  ; GP32:       sltu    $[[T1]], $[[T0]], $[[T1]]
+  ; GP32:       addu    $[[T2:[0-9]+]], $6, $[[T1]]
+  ; GP32:       sltu    $[[T1]], $[[T2]], $zero
+  ; GP32:       addu    $[[T3:[0-9]+]], $5, $[[T1]]
+  ; GP32:       sltu    $[[T1]], $[[T3]], $zero
+  ; GP32:       addu    $[[T1]], $4, $[[T1]]
+  ; GP32:       move    $4, $[[T2]]
+  ; GP32:       move    $5, $[[T0]]
 
-  ; GP32-CMOV:  addiu   $[[T0:[0-9]+]], $7, 4
-  ; GP32-CMOV:  sltu    $[[T1:[0-9]+]], $[[T0]], $7
-  ; GP32-CMOV:  addu    $[[T2:[0-9]+]], $6, $[[T1]]
-  ; GP32-CMOV:  sltu    $[[T3:[0-9]+]], $[[T2]], $6
-  ; GP32-CMOV:  movz    $[[T3]], $[[T1]], $[[T1]]
-  ; GP32-CMOV:  addu    $[[T4:[0-9]+]], $5, $[[T3]]
-  ; GP32-CMOV:  sltu    $[[T5:[0-9]+]], $[[T4]], $5
-  ; GP32-CMOV:  addu    $[[T7:[0-9]+]], $4, $[[T5]]
-  ; GP32-CMOV:  move    $4, $[[T2]]
-  ; GP32-CMOV:  move    $5, $[[T0]]
+  ; GP64:       daddiu  $[[T0:[0-9]+]], $5, 4
+  ; GP64:       daddiu  $[[T1:[0-9]+]], $zero, 4
+  ; GP64:       sltu    $[[T1]], $[[T0]], $[[T1]]
+  ; GP64:       daddu   $2, $4, $[[T1]]
 
-  ; GP64:           daddiu  $[[T0:[0-9]+]], $5, 4
-  ; GP64:           sltu    $[[T1:[0-9]+]], $[[T0]], $5
-  ; GP64-NOT-R2-R6: dsll    $[[T2:[0-9]+]], $[[T1]], 32
-  ; GP64-NOT-R2-R6: dsrl    $[[T3:[0-9]+]], $[[T2]], 32
-  ; GP64-R2-R6:     dext    $[[T3:[0-9]+]], $[[T1]], 0, 32
-
-  ; GP64:           daddu   $2, $4, $[[T3]]
-
-  ; MMR3:       addiur2 $[[T0:[0-9]+]], $7, 4
-  ; MMR3:       sltu    $[[T1:[0-9]+]], $[[T0]], $7
-  ; MMR3:       sltu    $[[T2:[0-9]+]], $[[T0]], $7
-  ; MMR3:       addu16  $[[T3:[0-9]+]], $6, $[[T2]]
-  ; MMR3:       sltu    $[[T4:[0-9]+]], $[[T3]], $6
-  ; MMR3:       movz    $[[T4]], $[[T2]], $[[T1]]
-  ; MMR3:       addu16  $[[T6:[0-9]+]], $5, $[[T4]]
-  ; MMR3:       sltu    $[[T7:[0-9]+]], $[[T6]], $5
-  ; MMR3:       addu16  $2, $4, $[[T7]]
-
-  ; MMR6: addiur2 $[[T1:[0-9]+]], $7, 4
-  ; MMR6: sltu    $[[T2:[0-9]+]], $[[T1]], $7
-  ; MMR6: xori    $[[T3:[0-9]+]], $[[T2]], 1
-  ; MMR6: selnez  $[[T4:[0-9]+]], $[[T2]], $[[T3]]
-  ; MMR6: addu16  $[[T5:[0-9]+]], $6, $[[T2]]
-  ; MMR6: sltu    $[[T6:[0-9]+]], $[[T5]], $6
-  ; MMR6: seleqz  $[[T7:[0-9]+]], $[[T6]], $[[T3]]
-  ; MMR6: or      $[[T8:[0-9]+]], $[[T4]], $[[T7]]
-  ; MMR6: addu16  $[[T9:[0-9]+]], $5, $[[T8]]
-  ; MMR6: sltu    $[[T10:[0-9]+]], $[[T9]], $5
-  ; MMR6: addu16  $[[T11:[0-9]+]], $4, $[[T10]]
-  ; MMR6: move    $4, $7
-  ; MMR6: move    $5, $[[T1]]
+  ; MM32:       addiu   $[[T0:[0-9]+]], $7, 4
+  ; MM32:       li16    $[[T1:[0-9]+]], 4
+  ; MM32:       sltu    $[[T1]], $[[T0]], $[[T1]]
+  ; MM32:       addu16  $[[T2:[0-9]+]], $6, $[[T1]]
+  ; MM32:       li16    $[[T1]], 0
+  ; MM32:       sltu    $[[T3:[0-9]+]], $[[T2]], $[[T1]]
+  ; MM32:       addu16  $[[T3]], $5, $[[T3]]
+  ; MM32:       sltu    $[[T1]], $[[T3]], $[[T1]]
+  ; MM32:       addu16  $[[T1]], $4, $[[T1]]
+  ; MM32:       move    $4, $[[T2]]
+  ; MM32:       move    $5, $[[T0]]
 
   ; MM64:       daddiu  $[[T0:[0-9]+]], $5, 4
-  ; MM64:       sltu    $[[T1:[0-9]+]], $[[T0]], $5
-  ; MM64:       dsll    $[[T2:[0-9]+]], $[[T1]], 32
-  ; MM64:       dsrl    $[[T3:[0-9]+]], $[[T2]], 32
-  ; MM64:       daddu   $2, $4, $[[T3]]
+  ; MM64:       daddiu  $[[T1:[0-9]+]], $zero, 4
+  ; MM64:       sltu    $[[T1]], $[[T0]], $[[T1]]
+  ; MM64:       daddu   $2, $4, $[[T1]]
 
   %r = add i128 4, %a
   ret i128 %r
@@ -467,15 +380,16 @@
 ; ALL-LABEL: add_i64_3:
 
   ; GP32:       addiu   $[[T0:[0-9]+]], $5, 3
-  ; GP32:       sltu    $[[T1:[0-9]+]], $[[T0]], $5
+  ; GP32:       addiu   $[[T1:[0-9]+]], $zero, 3
+  ; GP32:       sltu    $[[T1]], $[[T0]], $[[T1]]
   ; GP32:       addu    $2, $4, $[[T1]]
 
   ; GP64:       daddiu  $2, $4, 3
 
-  ; MM32:       move    $[[T1:[0-9]+]], $5
-  ; MM32:       addius5 $[[T1]], 3
-  ; MM32:       sltu    $[[T2:[0-9]+]], $[[T1]], $5
-  ; MM32:       addu16  $2, $4, $[[T2]]
+  ; MM32:       addiu   $[[T0:[0-9]+]], $5, 3
+  ; MM32:       li16    $[[T1:[0-9]+]], 3
+  ; MM32:       sltu    $[[T2:[0-9]+]], $[[T0]], $[[T1]]
+  ; MM32:       addu    $2, $4, $[[T2]]
 
   ; MM64:       daddiu  $2, $4, 3
 
@@ -486,70 +400,38 @@
 define signext i128 @add_i128_3(i128 signext %a) {
 ; ALL-LABEL: add_i128_3:
 
-  ; PRE4:       move   $[[T0:[0-9]+]], $5
-  ; PRE4:       addiu  $[[T1:[0-9]+]], $7, 3
-  ; PRE4:       sltu   $[[T2:[0-9]+]], $[[T1]], $7
-  ; PRE4:       xori   $[[T3:[0-9]+]], $[[T2]], 1
-  ; PRE4:       bnez   $[[T3]], $BB[[BB0:[0-9_]+]]
-  ; PRE4:       addu   $[[T4:[0-9]+]], $6, $[[T2]]
-  ; PRE4:       sltu   $[[T5:[0-9]+]], $[[T4]], $6
-  ; PRE4;       $BB[[BB0:[0-9]+]]:
-  ; PRE4:       addu   $[[T6:[0-9]+]], $[[T0]], $[[T5]]
-  ; PRE4:       sltu   $[[T7:[0-9]+]], $[[T6]], $[[T0]]
-  ; PRE4:       addu   $[[T8:[0-9]+]], $4, $[[T7]]
-  ; PRE4:       move    $4, $[[T4]]
+  ; GP32:       addiu   $[[T0:[0-9]+]], $7, 3
+  ; GP32:       addiu   $[[T1:[0-9]+]], $zero, 3
+  ; GP32:       sltu    $[[T1]], $[[T0]], $[[T1]]
+  ; GP32:       addu    $[[T2:[0-9]+]], $6, $[[T1]]
+  ; GP32:       sltu    $[[T3:[0-9]+]], $[[T2]], $zero
+  ; GP32:       addu    $[[T4:[0-9]+]], $5, $[[T3]]
+  ; GP32:       sltu    $[[T5:[0-9]+]], $[[T4]], $zero
+  ; GP32:       addu    $[[T5]], $4, $[[T5]]
+  ; GP32:       move    $4, $[[T2]]
+  ; GP32:       move    $5, $[[T0]]
 
-  ; GP32-CMOV:  addiu   $[[T0:[0-9]+]], $7, 3
-  ; GP32-CMOV:  sltu    $[[T1:[0-9]+]], $[[T0]], $7
-  ; GP32-CMOV:  addu    $[[T2:[0-9]+]], $6, $[[T1]]
-  ; GP32-CMOV:  sltu    $[[T3:[0-9]+]], $[[T2]], $6
-  ; GP32-CMOV:  movz    $[[T3]], $[[T1]], $[[T1]]
-  ; GP32-CMOV:  addu    $[[T4:[0-9]+]], $5, $[[T3]]
-  ; GP32-CMOV:  sltu    $[[T5:[0-9]+]], $[[T4]], $5
-  ; GP32-CMOV:  addu    $[[T7:[0-9]+]], $4, $[[T5]]
-  ; GP32-CMOV:  move    $4, $[[T2]]
-  ; GP32-CMOV:  move    $5, $[[T0]]
+  ; GP64:       daddiu  $[[T0:[0-9]+]], $5, 3
+  ; GP64:       daddiu  $[[T1:[0-9]+]], $zero, 3
+  ; GP64:       sltu    $[[T1]], $[[T0]], $[[T1]]
+  ; GP64:       daddu   $2, $4, $[[T1]]
 
-  ; GP64:           daddiu  $[[T0:[0-9]+]], $5, 3
-  ; GP64:           sltu    $[[T1:[0-9]+]], $[[T0]], $5
-
-  ; GP64-NOT-R2-R6: dsll    $[[T2:[0-9]+]], $[[T1]], 32
-  ; GP64-NOT-R2-R6: dsrl    $[[T3:[0-9]+]], $[[T2]], 32
-  ; GP64-R2-R6:     dext    $[[T3:[0-9]+]], $[[T1]], 0, 32
-
-  ; GP64:           daddu   $2, $4, $[[T3]]
-
-  ; MMR3:       move    $[[T1:[0-9]+]], $7
-  ; MMR3:       addius5 $[[T1]], 3
-  ; MMR3:       sltu    $[[T2:[0-9]+]], $[[T1]], $7
-  ; MMR3:       sltu    $[[T3:[0-9]+]], $[[T1]], $7
-  ; MMR3:       addu16  $[[T4:[0-9]+]], $6, $[[T3]]
-  ; MMR3:       sltu    $[[T5:[0-9]+]], $[[T4]], $6
-  ; MMR3:       movz    $[[T5]], $[[T3]], $[[T2]]
-  ; MMR3:       addu16  $[[T6:[0-9]+]], $5, $[[T5]]
-  ; MMR3:       sltu    $[[T7:[0-9]+]], $[[T6]], $5
-  ; MMR3:       addu16  $2, $4, $[[T7]]
-
-  ; MMR6: move    $[[T1:[0-9]+]], $7
-  ; MMR6: addius5 $[[T1]], 3
-  ; MMR6: sltu    $[[T2:[0-9]+]], $[[T1]], $7
-  ; MMR6: xori    $[[T3:[0-9]+]], $[[T2]], 1
-  ; MMR6: selnez  $[[T4:[0-9]+]], $[[T2]], $[[T3]]
-  ; MMR6: addu16  $[[T5:[0-9]+]], $6, $[[T2]]
-  ; MMR6: sltu    $[[T6:[0-9]+]], $[[T5]], $6
-  ; MMR6: seleqz  $[[T7:[0-9]+]], $[[T6]], $[[T3]]
-  ; MMR6: or      $[[T8:[0-9]+]], $[[T4]], $[[T7]]
-  ; MMR6: addu16  $[[T9:[0-9]+]], $5, $[[T8]]
-  ; MMR6: sltu    $[[T10:[0-9]+]], $[[T9]], $5
-  ; MMR6: addu16  $[[T11:[0-9]+]], $4, $[[T10]]
-  ; MMR6: move    $4, $[[T5]]
-  ; MMR6: move    $5, $[[T1]]
+  ; MM32:       addiu   $[[T0:[0-9]+]], $7, 3
+  ; MM32:       li16    $[[T1:[0-9]+]], 3
+  ; MM32:       sltu    $[[T1]], $[[T0]], $[[T1]]
+  ; MM32:       addu16  $[[T2:[0-9]+]], $6, $[[T1]]
+  ; MM32:       li16    $[[T3:[0-9]+]], 0
+  ; MM32:       sltu    $[[T4:[0-9]+]], $[[T2]], $[[T3]]
+  ; MM32:       addu16  $[[T4]], $5, $[[T4]]
+  ; MM32:       sltu    $[[T5:[0-9]+]], $[[T4]], $[[T3]]
+  ; MM32:       addu16  $[[T5]], $4, $[[T5]]
+  ; MM32:       move    $4, $[[T2]]
+  ; MM32:       move    $5, $[[T0]]
 
   ; MM64:       daddiu  $[[T0:[0-9]+]], $5, 3
-  ; MM64:       sltu    $[[T1:[0-9]+]], $[[T0]], $5
-  ; MM64:       dsll    $[[T2:[0-9]+]], $[[T1]], 32
-  ; MM64:       dsrl    $[[T3:[0-9]+]], $[[T2]], 32
-  ; MM64:       daddu   $2, $4, $[[T3]]
+  ; MM64:       daddiu  $[[T1:[0-9]+]], $zero, 3
+  ; MM64:       sltu    $[[T1]], $[[T0]], $[[T1]]
+  ; MM64:       daddu   $2, $4, $[[T1]]
 
   %r = add i128 3, %a
   ret i128 %r
diff --git a/llvm/test/CodeGen/Mips/llvm-ir/sub.ll b/llvm/test/CodeGen/Mips/llvm-ir/sub.ll
index 655addb..a730063 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/sub.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/sub.ll
@@ -1,5 +1,5 @@
 ; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
-; RUN:    -check-prefixes=NOT-R2-R6,GP32,GP32-NOT-MM,NOT-MM,PRE4
+; RUN:    -check-prefixes=NOT-R2-R6,GP32,GP32-NOT-MM,NOT-MM
 ; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
 ; RUN:    -check-prefixes=NOT-R2-R6,GP32,GP32-NOT-MM,NOT-MM
 ; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
@@ -11,25 +11,25 @@
 ; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
 ; RUN:    -check-prefixes=R2-R6,GP32,GP32-NOT-MM,NOT-MM
 ; RUN: llc < %s -march=mips -mcpu=mips32r3 -mattr=+micromips -verify-machineinstrs | FileCheck %s \
-; RUN:    -check-prefixes=GP32-MM,GP32,MM32,MMR3
+; RUN:    -check-prefixes=GP32-MM,GP32,MM
 ; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips | FileCheck %s \
-; RUN:    -check-prefixes=GP32-MM,GP32,MM32,MMR6
+; RUN:    -check-prefixes=GP32-MM,GP32,MM
 ; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
-; RUN:    -check-prefixes=NOT-R2-R6,GP64,NOT-MM,GP64-NOT-R2
+; RUN:    -check-prefixes=NOT-R2-R6,GP64,NOT-MM
 ; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
-; RUN:    -check-prefixes=NOT-R2-R6,GP64,NOT-MM,GP64-NOT-R2
+; RUN:    -check-prefixes=NOT-R2-R6,GP64,NOT-MM
 ; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
-; RUN:    -check-prefixes=NOT-R2-R6,GP64,NOT-MM,GP64-NOT-R2
+; RUN:    -check-prefixes=NOT-R2-R6,GP64,NOT-MM
 ; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
-; RUN:    -check-prefixes=R2-R6,GP64,NOT-MM,GP64-R2
+; RUN:    -check-prefixes=R2-R6,GP64,NOT-MM
 ; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \
-; RUN:    -check-prefixes=R2-R6,GP64,NOT-MM,GP64-R2
+; RUN:    -check-prefixes=R2-R6,GP64,NOT-MM
 ; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s \
-; RUN:    -check-prefixes=R2-R6,GP64,NOT-MM,GP64-R2
+; RUN:    -check-prefixes=R2-R6,GP64,NOT-MM
 ; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
-; RUN:    -check-prefixes=R2-R6,GP64,NOT-MM,GP64-R2
+; RUN:    -check-prefixes=R2-R6,GP64,NOT-MM
 ; RUN: llc < %s -march=mips64 -mcpu=mips64r6 -mattr=+micromips | FileCheck %s \
-; RUN:    -check-prefixes=GP64,MM64
+; RUN:    -check-prefixes=GP64,MM
 
 define signext i1 @sub_i1(i1 signext %a, i1 signext %b) {
 entry:
@@ -100,15 +100,10 @@
 entry:
 ; ALL-LABEL: sub_i64:
 
-  ; GP32-NOT-MM:    sltu    $[[T0:[0-9]+]], $5, $7
-  ; GP32-NOT-MM:    subu    $2, $4, $6
-  ; GP32-NOT-MM:    subu    $2, $2, $[[T0]]
-  ; GP32-NOT-MM:    subu    $3, $5, $7
-
-  ; MM32:           sltu    $[[T0:[0-9]+]], $5, $7
-  ; MM32:           subu16    $3, $4, $6
-  ; MM32:           subu16    $2, $3, $[[T0]]
-  ; MM32:           subu16    $3, $5, $7
+  ; GP32-NOT-MM     subu    $3, $5, $7
+  ; GP32:           sltu    $[[T0:[0-9]+]], $5, $7
+  ; GP32:           addu    $[[T1:[0-9]+]], $[[T0]], $6
+  ; GP32:           subu    $2, $4, $[[T1]]
 
   ; GP64:           dsubu   $2, $4, $5
 
@@ -120,109 +115,42 @@
 entry:
 ; ALL-LABEL: sub_i128:
 
-; PRE4: lw     $[[T0:[0-9]+]], 24($sp)
-; PRE4: lw     $[[T1:[0-9]+]], 28($sp)
-; PRE4: sltu   $[[T2:[0-9]+]], $7, $[[T1]]
-; PRE4: xor    $[[T3:[0-9]+]], $6, $[[T0]]
-; PRE4: sltiu  $[[T4:[0-9]+]], $[[T3]], 1
-; PRE4: bnez   $[[T4]]
-; PRE4: move   $[[T5:[0-9]+]], $[[T2]]
-; PRE4: sltu   $[[T5]], $6, $[[T0]]
+  ; GP32-NOT-MM:    lw        $[[T0:[0-9]+]], 20($sp)
+  ; GP32-NOT-MM:    sltu      $[[T1:[0-9]+]], $5, $[[T0]]
+  ; GP32-NOT-MM:    lw        $[[T2:[0-9]+]], 16($sp)
+  ; GP32-NOT-MM:    addu      $[[T3:[0-9]+]], $[[T1]], $[[T2]]
+  ; GP32-NOT-MM:    lw        $[[T4:[0-9]+]], 24($sp)
+  ; GP32-NOT-MM:    lw        $[[T5:[0-9]+]], 28($sp)
+  ; GP32-NOT-MM:    subu      $[[T6:[0-9]+]], $7, $[[T5]]
+  ; GP32-NOT-MM:    subu      $2, $4, $[[T3]]
+  ; GP32-NOT-MM:    sltu      $[[T8:[0-9]+]], $6, $[[T4]]
+  ; GP32-NOT-MM:    addu      $[[T9:[0-9]+]], $[[T8]], $[[T0]]
+  ; GP32-NOT-MM:    subu      $3, $5, $[[T9]]
+  ; GP32-NOT-MM:    sltu      $[[T10:[0-9]+]], $7, $[[T5]]
+  ; GP32-NOT-MM:    addu      $[[T11:[0-9]+]], $[[T10]], $[[T4]]
+  ; GP32-NOT-MM:    subu      $4, $6, $[[T11]]
+  ; GP32-NOT-MM:    move      $5, $[[T6]]
 
-; PRE4: lw     $[[T6:[0-9]+]], 20($sp)
-; PRE4: subu   $[[T7:[0-9]+]], $5, $[[T6]]
-; PRE4: subu   $[[T8:[0-9]+]], $[[T7]], $[[T5]]
-; PRE4: sltu   $[[T9:[0-9]+]], $[[T7]], $[[T5]]
-; PRE4: sltu   $[[T10:[0-9]+]], $5, $[[T6]]
-; PRE4: lw     $[[T11:[0-9]+]], 16($sp)
-; PRE4: subu   $[[T12:[0-9]+]], $4, $[[T11]]
-; PRE4: subu   $[[T13:[0-9]+]], $[[T12]], $[[T10]]
-; PRE4: subu   $[[T14:[0-9]+]], $[[T13]], $[[T9]]
-; PRE4: subu   $[[T15:[0-9]+]], $6, $[[T0]]
-; PRE4: subu   $[[T16:[0-9]+]], $[[T15]], $[[T2]]
-; PRE4: subu   $5, $7, $[[T1]]
+  ; GP32-MM:        lw        $[[T0:[0-9]+]], 20($sp)
+  ; GP32-MM:        sltu      $[[T1:[0-9]+]], $[[T2:[0-9]+]], $[[T0]]
+  ; GP32-MM:        lw        $[[T3:[0-9]+]], 16($sp)
+  ; GP32-MM:        addu      $[[T3]], $[[T1]], $[[T3]]
+  ; GP32-MM:        lw        $[[T4:[0-9]+]], 24($sp)
+  ; GP32-MM:        lw        $[[T5:[0-9]+]], 28($sp)
+  ; GP32-MM:        subu      $[[T1]], $7, $[[T5]]
+  ; GP32-MM:        subu16    $[[T3]], $[[T6:[0-9]+]], $[[T3]]
+  ; GP32-MM:        sltu      $[[T6]], $6, $[[T4]]
+  ; GP32-MM:        addu16    $[[T0]], $[[T6]], $[[T0]]
+  ; GP32-MM:        subu16    $[[T0]], $5, $[[T0]]
+  ; GP32-MM:        sltu      $[[T6]], $7, $[[T5]]
+  ; GP32-MM:        addu      $[[T6]], $[[T6]], $[[T4]]
+  ; GP32-MM:        subu16    $[[T6]], $6, $[[T6]]
+  ; GP32-MM:        move      $[[T2]], $[[T1]]
 
-; MMR3: lw       $[[T1:[0-9]+]], 48($sp)
-; MMR3: sltu     $[[T2:[0-9]+]], $6, $[[T1]]
-; MMR3: xor      $[[T3:[0-9]+]], $6, $[[T1]]
-; MMR3: lw       $[[T4:[0-9]+]], 52($sp)
-; MMR3: sltu     $[[T5:[0-9]+]], $7, $[[T4]]
-; MMR3: movz     $[[T6:[0-9]+]], $[[T5]], $[[T3]]
-; MMR3: lw       $[[T7:[0-8]+]], 44($sp)
-; MMR3: subu16   $[[T8:[0-9]+]], $5, $[[T7]]
-; MMR3: subu16   $[[T9:[0-9]+]], $[[T8]], $[[T6]]
-; MMR3: sltu     $[[T10:[0-9]+]], $[[T8]], $[[T2]]
-; MMR3: sltu     $[[T11:[0-9]+]], $5, $[[T7]]
-; MMR3: lw       $[[T12:[0-9]+]], 40($sp)
-; MMR3: lw       $[[T13:[0-9]+]], 12($sp)
-; MMR3: subu16   $[[T14:[0-9]+]], $[[T13]], $[[T12]]
-; MMR3: subu16   $[[T15:[0-9]+]], $[[T14]], $[[T11]]
-; MMR3: subu16   $[[T16:[0-9]+]], $[[T15]], $[[T10]]
-; MMR3: subu16   $[[T17:[0-9]+]], $6, $[[T1]]
-; MMR3: subu16   $[[T18:[0-9]+]], $[[T17]], $7
-; MMR3: lw       $[[T19:[0-9]+]], 8($sp)
-; MMR3: lw       $[[T20:[0-9]+]], 0($sp)
-; MMR3: subu16   $5, $[[T19]], $[[T20]]
-
-; MMR6: move     $[[T0:[0-9]+]], $7
-; MMR6: sw       $[[T0]], 8($sp)
-; MMR6: move     $[[T1:[0-9]+]], $5
-; MMR6: sw       $4, 12($sp)
-; MMR6: lw       $[[T2:[0-9]+]], 48($sp)
-; MMR6: sltu     $[[T3:[0-9]+]], $6, $[[T2]]
-; MMR6: xor      $[[T4:[0-9]+]], $6, $[[T2]]
-; MMR6: sltiu    $[[T5:[0-9]+]], $[[T4]], 1
-; MMR6: seleqz   $[[T6:[0-9]+]], $[[T3]], $[[T5]]
-; MMR6: lw       $[[T7:[0-9]+]], 52($sp)
-; MMR6: sltu     $[[T8:[0-9]+]], $[[T0]], $[[T7]]
-; MMR6: selnez   $[[T9:[0-9]+]], $[[T8]], $[[T5]]
-; MMR6: or       $[[T10:[0-9]+]], $[[T9]], $[[T6]]
-; MMR6: lw       $[[T11:[0-9]+]], 44($sp)
-; MMR6: subu16   $[[T12:[0-9]+]], $[[T1]], $[[T11]]
-; MMR6: subu16   $[[T13:[0-9]+]], $[[T12]], $[[T7]]
-; MMR6: sltu     $[[T16:[0-9]+]], $[[T12]], $[[T7]]
-; MMR6: sltu     $[[T17:[0-9]+]], $[[T1]], $[[T11]]
-; MMR6: lw       $[[T18:[0-9]+]], 40($sp)
-; MMR6: lw       $[[T19:[0-9]+]], 12($sp)
-; MMR6: subu16   $[[T20:[0-9]+]], $[[T19]], $[[T18]]
-; MMR6: subu16   $[[T21:[0-9]+]], $[[T20]], $[[T17]]
-; MMR6: subu16   $[[T22:[0-9]+]], $[[T21]], $[[T16]]
-; MMR6: subu16   $[[T23:[0-9]+]], $6, $[[T2]]
-; MMR6: subu16   $4, $[[T23]], $5
-; MMR6: lw       $[[T24:[0-9]+]], 8($sp)
-; MMR6: lw       $[[T25:[0-9]+]], 0($sp)
-; MMR6: subu16   $5, $[[T24]], $[[T25]]
-; MMR6: lw       $3, 4($sp)
-
-; FIXME: The sltu, dsll, dsrl pattern here occurs when an i32 is zero
-;        extended to 64 bits. Fortunately slt(i)(u) actually gives an i1.
-;        These should be combined away.
-
-; GP64-NOT-R2: dsubu     $1, $4, $6
-; GP64-NOT-R2: sltu      $[[T0:[0-9]+]], $5, $7
-; GP64-NOT-R2: dsll      $[[T1:[0-9]+]], $[[T0]], 32
-; GP64-NOT-R2: dsrl      $[[T2:[0-9]+]], $[[T1]], 32
-; GP64-NOT-R2: dsubu     $2, $1, $[[T2]]
-; GP64-NOT-R2: dsubu     $3, $5, $7
-
-; FIXME: Likewise for the sltu, dext here.
-
-; GP64-R2:     dsubu     $1, $4, $6
-; GP64-R2:     sltu      $[[T0:[0-9]+]], $5, $7
-; GP64-R2:     dext      $[[T1:[0-9]+]], $[[T0]], 0, 32
-; GP64-R2:     dsubu     $2, $1, $[[T1]]
-; GP64-R2:     dsubu     $3, $5, $7
-
-; FIXME: Again, redundant sign extension. Also, microMIPSR6 has the
-;        dext instruction which should be used here.
-
-; MM64: dsubu   $[[T0:[0-9]+]], $4, $6
-; MM64: sltu    $[[T1:[0-9]+]], $5, $7
-; MM64: dsll    $[[T2:[0-9]+]], $[[T1]], 32
-; MM64: dsrl    $[[T3:[0-9]+]], $[[T2]], 32
-; MM64: dsubu   $2, $[[T0]], $[[T3]]
-; MM64: dsubu   $3, $5, $7
-; MM64: jr      $ra
+  ; GP64:           dsubu     $3, $5, $7
+  ; GP64:           sltu      $[[T0:[0-9]+]], $5, $7
+  ; GP64:           daddu     $[[T1:[0-9]+]], $[[T0]], $6
+  ; GP64:           dsubu     $2, $4, $[[T1]]
 
   %r = sub i128 %a, %b
   ret i128 %r
diff --git a/llvm/test/CodeGen/Mips/madd-msub.ll b/llvm/test/CodeGen/Mips/madd-msub.ll
index 3e1a2e8..7baba00 100644
--- a/llvm/test/CodeGen/Mips/madd-msub.ll
+++ b/llvm/test/CodeGen/Mips/madd-msub.ll
@@ -25,11 +25,11 @@
 
 ; 32R6-DAG:      mul  $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
 ; 32R6-DAG:      addu $[[T1:[0-9]+]], $[[T0]], $6
-; 32R6-DAG:      sltu $[[T2:[0-9]+]], $[[T1]], $[[T0]]
-; 32R6-DAG:      muh  $[[T3:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG:      sra  $[[T4:[0-9]+]], $6, 31
-; 32R6-DAG:      addu $[[T5:[0-9]+]], $[[T3]], $[[T4]]
-; 32R6-DAG:      addu $2, $[[T5]], $[[T2]]
+; 32R6-DAG:      sltu $[[T2:[0-9]+]], $[[T1]], $6
+; 32R6-DAG:      sra  $[[T3:[0-9]+]], $6, 31
+; 32R6-DAG:      addu $[[T4:[0-9]+]], $[[T2]], $[[T3]]
+; 32R6-DAG:      muh  $[[T5:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG:      addu $2, $[[T5]], $[[T4]]
 
 ; 64-DAG:        sll $[[T0:[0-9]+]], $4, 0
 ; 64-DAG:        sll $[[T1:[0-9]+]], $5, 0
@@ -71,7 +71,7 @@
 
 ; 32R6-DAG:      mul  $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
 ; 32R6-DAG:      addu $[[T1:[0-9]+]], $[[T0]], $6
-; 32R6-DAG:      sltu $[[T2:[0-9]+]], $[[T1]], $[[T0]]
+; 32R6-DAG:      sltu $[[T2:[0-9]+]], $[[T1]], $6
 ; FIXME: There's a redundant move here. We should remove it
 ; 32R6-DAG:      muhu $[[T3:[0-9]+]], ${{[45]}}, ${{[45]}}
 ; 32R6-DAG:      addu $2, $[[T3]], $[[T2]]
@@ -109,10 +109,10 @@
 
 ; 32R6-DAG:      mul  $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
 ; 32R6-DAG:      addu $[[T1:[0-9]+]], $[[T0]], $7
-; 32R6-DAG:      sltu $[[T2:[0-9]+]], $[[T1]], $1
-; 32R6-DAG:      muh  $[[T3:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG:      addu $[[T4:[0-9]+]], $[[T3]], $6
-; 32R6-DAG:      addu $2, $[[T4]], $[[T2]]
+; 32R6-DAG:      sltu $[[T2:[0-9]+]], $[[T1]], $7
+; 32R6-DAG:      addu $[[T4:[0-9]+]], $[[T2]], $6
+; 32R6-DAG:      muh  $[[T5:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG:      addu $2, $[[T5]], $[[T4]]
 
 ; 64-DAG:        sll $[[T0:[0-9]+]], $4, 0
 ; 64-DAG:        sll $[[T1:[0-9]+]], $5, 0
@@ -134,17 +134,6 @@
   ret i64 %add
 }
 
-; ALL-LABEL: madd4
-; ALL-NOT: madd ${{[0-9]+}}, ${{[0-9]+}}
-
-define i32 @madd4(i32 %a, i32 %b, i32 %c) {
-entry:
-  %mul = mul nsw i32 %a, %b
-  %add = add nsw i32 %c, %mul
-
-  ret i32 %add
-}
-
 ; ALL-LABEL: msub1:
 
 ; 32-DAG:        sra $[[T0:[0-9]+]], $6, 31
@@ -159,13 +148,13 @@
 ; DSP-DAG:       mfhi $2, $[[AC]]
 ; DSP-DAG:       mflo $3, $[[AC]]
 
-; 32R6-DAG:      mul  $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG:      sltu $[[T1:[0-9]+]], $6, $[[T0]]
-; 32R6-DAG:      muh  $[[T2:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG:      sra  $[[T3:[0-9]+]], $6, 31
-; 32R6-DAG:      subu $[[T4:[0-9]+]], $[[T3]], $[[T2]]
-; 32R6-DAG:      subu $2, $[[T4]], $[[T1]]
-; 32R6-DAG:      subu $3, $6, $[[T0]]
+; 32R6-DAG:      muh  $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG:      mul  $[[T1:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG:      sltu $[[T3:[0-9]+]], $6, $[[T1]]
+; 32R6-DAG:      addu $[[T4:[0-9]+]], $[[T3]], $[[T0]]
+; 32R6-DAG:      sra  $[[T5:[0-9]+]], $6, 31
+; 32R6-DAG:      subu $2, $[[T5]], $[[T4]]
+; 32R6-DAG:      subu $3, $6, $[[T1]]
 
 ; 64-DAG:        sll $[[T0:[0-9]+]], $4, 0
 ; 64-DAG:        sll $[[T1:[0-9]+]], $5, 0
@@ -205,12 +194,13 @@
 ; DSP-DAG:       mfhi $2, $[[AC]]
 ; DSP-DAG:       mflo $3, $[[AC]]
 
-; 32R6-DAG:      mul $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG:      sltu $[[T1:[0-9]+]], $6, $[[T0]]
-; 32R6-DAG:      muhu $[[T2:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG:      negu $[[T3:[0-9]+]], $[[T2]]
-; 32R6-DAG:      subu $2, $[[T3]], $[[T1]]
-; 32R6-DAG:      subu $3, $6, $[[T0]]
+; 32R6-DAG:      muhu $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG:      mul $[[T1:[0-9]+]], ${{[45]}}, ${{[45]}}
+
+; 32R6-DAG:      sltu $[[T2:[0-9]+]], $6, $[[T1]]
+; 32R6-DAG:      addu $[[T3:[0-9]+]], $[[T2]], $[[T0]]
+; 32R6-DAG:      negu $2, $[[T3]]
+; 32R6-DAG:      subu $3, $6, $[[T1]]
 
 ; 64-DAG:        d[[m:m]]ult $5, $4
 ; 64-DAG:        [[m]]flo $[[T0:[0-9]+]]
@@ -244,12 +234,12 @@
 ; DSP-DAG:       mfhi $2, $[[AC]]
 ; DSP-DAG:       mflo $3, $[[AC]]
 
-; 32R6-DAG:      mul $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG:      sltu $[[T1:[0-9]+]], $7, $[[T0]]
-; 32R6-DAG:      muh $[[T2:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG:      subu $[[T3:[0-9]+]], $6, $[[T2]]
-; 32R6-DAG:      subu $2, $[[T3]], $[[T1]]
-; 32R6-DAG:      subu $3, $7, $[[T0]]
+; 32R6-DAG:      muh $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG:      mul $[[T1:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG:      sltu $[[T2:[0-9]+]], $7, $[[T1]]
+; 32R6-DAG:      addu $[[T3:[0-9]+]], $[[T2]], $[[T0]]
+; 32R6-DAG:      subu $2, $6, $[[T3]]
+; 32R6-DAG:      subu $3, $7, $[[T1]]
 
 ; 64-DAG:        sll $[[T0:[0-9]+]], $4, 0
 ; 64-DAG:        sll $[[T1:[0-9]+]], $5, 0
@@ -270,14 +260,3 @@
   %sub = sub nsw i64 %c, %mul
   ret i64 %sub
 }
-
-; ALL-LABEL: msub4
-; ALL-NOT: msub ${{[0-9]+}}, ${{[0-9]+}}
-
-define i32 @msub4(i32 %a, i32 %b, i32 %c) {
-entry:
-  %mul = mul nsw i32 %a, %b
-  %sub = sub nsw i32 %c, %mul
-
-  ret i32 %sub
-}