AArch64/ARM64: use HS instead of CS & LO instead of CC.

On instructions using the NZCV register, a couple of conditions have dual
representations: HS/CS and LO/CC (meaning unsigned-higher-or-same/carry-set and
unsigned-lower/carry-clear). The first of these is more descriptive in most
circumstances, so we should print it.

llvm-svn: 207644
diff --git a/llvm/test/CodeGen/ARM64/fast-isel-icmp.ll b/llvm/test/CodeGen/ARM64/fast-isel-icmp.ll
index 22af542..68a76c9 100644
--- a/llvm/test/CodeGen/ARM64/fast-isel-icmp.ll
+++ b/llvm/test/CodeGen/ARM64/fast-isel-icmp.ll
@@ -54,7 +54,7 @@
 entry:
 ; CHECK: icmp_uge
 ; CHECK: cmp  w0, w1
-; CHECK: csinc w0, wzr, wzr, cc
+; CHECK: csinc w0, wzr, wzr, lo
   %cmp = icmp uge i32 %a, %b
   %conv = zext i1 %cmp to i32
   ret i32 %conv
@@ -64,7 +64,7 @@
 entry:
 ; CHECK: icmp_ult
 ; CHECK: cmp  w0, w1
-; CHECK: csinc w0, wzr, wzr, cs
+; CHECK: csinc w0, wzr, wzr, hs
   %cmp = icmp ult i32 %a, %b
   %conv = zext i1 %cmp to i32
   ret i32 %conv
@@ -158,7 +158,7 @@
 ; CHECK: uxth w0, w0
 ; CHECK: uxth w1, w1
 ; CHECK: cmp  w0, w1
-; CHECK: csinc w0, wzr, wzr, cs
+; CHECK: csinc w0, wzr, wzr, hs
   %cmp = icmp ult i16 %a, %b
   %conv2 = zext i1 %cmp to i32
   ret i32 %conv2
@@ -206,7 +206,7 @@
 ; CHECK: icmp_i1_unsigned_const
 ; CHECK: and w0, w0, #0x1
 ; CHECK: cmp  w0, #0
-; CHECK: csinc w0, wzr, wzr, cs
+; CHECK: csinc w0, wzr, wzr, hs
 ; CHECK: and w0, w0, #0x1
   %cmp = icmp ult i1 %a, 0
   %conv2 = zext i1 %cmp to i32
diff --git a/llvm/test/CodeGen/ARM64/xaluo.ll b/llvm/test/CodeGen/ARM64/xaluo.ll
index 6a8520d..bda41b10 100644
--- a/llvm/test/CodeGen/ARM64/xaluo.ll
+++ b/llvm/test/CodeGen/ARM64/xaluo.ll
@@ -31,7 +31,7 @@
 entry:
 ; CHECK-LABEL:  uaddo.i32
 ; CHECK:        adds w8, w0, w1
-; CHECK-NEXT:   csinc w0, wzr, wzr, cc
+; CHECK-NEXT:   csinc w0, wzr, wzr, lo
   %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
   %obit = extractvalue {i32, i1} %t, 1
@@ -43,7 +43,7 @@
 entry:
 ; CHECK-LABEL:  uaddo.i64
 ; CHECK:        adds x8, x0, x1
-; CHECK-NEXT:   csinc w0, wzr, wzr, cc
+; CHECK-NEXT:   csinc w0, wzr, wzr, lo
   %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
@@ -79,7 +79,7 @@
 entry:
 ; CHECK-LABEL:  usubo.i32
 ; CHECK:        subs w8, w0, w1
-; CHECK-NEXT:   csinc w0, wzr, wzr, cs
+; CHECK-NEXT:   csinc w0, wzr, wzr, hs
   %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
   %obit = extractvalue {i32, i1} %t, 1
@@ -91,7 +91,7 @@
 entry:
 ; CHECK-LABEL:  usubo.i64
 ; CHECK:        subs x8, x0, x1
-; CHECK-NEXT:   csinc w0, wzr, wzr, cs
+; CHECK-NEXT:   csinc w0, wzr, wzr, hs
   %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
@@ -184,7 +184,7 @@
 entry:
 ; CHECK-LABEL:  uaddo.select.i32
 ; CHECK:        cmn w0, w1
-; CHECK-NEXT:   csel w0, w0, w1, cs
+; CHECK-NEXT:   csel w0, w0, w1, hs
   %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
   %obit = extractvalue {i32, i1} %t, 1
   %ret = select i1 %obit, i32 %v1, i32 %v2
@@ -195,7 +195,7 @@
 entry:
 ; CHECK-LABEL:  uaddo.select.i64
 ; CHECK:        cmn x0, x1
-; CHECK-NEXT:   csel x0, x0, x1, cs
+; CHECK-NEXT:   csel x0, x0, x1, hs
   %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
   %obit = extractvalue {i64, i1} %t, 1
   %ret = select i1 %obit, i64 %v1, i64 %v2
@@ -228,7 +228,7 @@
 entry:
 ; CHECK-LABEL:  usubo.select.i32
 ; CHECK:        cmp w0, w1
-; CHECK-NEXT:   csel w0, w0, w1, cc
+; CHECK-NEXT:   csel w0, w0, w1, lo
   %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
   %obit = extractvalue {i32, i1} %t, 1
   %ret = select i1 %obit, i32 %v1, i32 %v2
@@ -239,7 +239,7 @@
 entry:
 ; CHECK-LABEL:  usubo.select.i64
 ; CHECK:        cmp x0, x1
-; CHECK-NEXT:   csel x0, x0, x1, cc
+; CHECK-NEXT:   csel x0, x0, x1, lo
   %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
   %obit = extractvalue {i64, i1} %t, 1
   %ret = select i1 %obit, i64 %v1, i64 %v2
@@ -338,7 +338,7 @@
 entry:
 ; CHECK-LABEL:  uaddo.br.i32
 ; CHECK:        cmn w0, w1
-; CHECK-NEXT:   b.cc
+; CHECK-NEXT:   b.lo
   %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
   %obit = extractvalue {i32, i1} %t, 1
@@ -355,7 +355,7 @@
 entry:
 ; CHECK-LABEL:  uaddo.br.i64
 ; CHECK:        cmn x0, x1
-; CHECK-NEXT:   b.cc
+; CHECK-NEXT:   b.lo
   %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1
@@ -406,7 +406,7 @@
 entry:
 ; CHECK-LABEL:  usubo.br.i32
 ; CHECK:        cmp w0, w1
-; CHECK-NEXT:   b.cs
+; CHECK-NEXT:   b.hs
   %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
   %val = extractvalue {i32, i1} %t, 0
   %obit = extractvalue {i32, i1} %t, 1
@@ -423,7 +423,7 @@
 entry:
 ; CHECK-LABEL:  usubo.br.i64
 ; CHECK:        cmp x0, x1
-; CHECK-NEXT:   b.cs
+; CHECK-NEXT:   b.hs
   %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
   %val = extractvalue {i64, i1} %t, 0
   %obit = extractvalue {i64, i1} %t, 1