ARM64: Avoid the duplication of condition codes.
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 3f90f21..3edf59b 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -21,6 +21,8 @@
 #include "thread.h"
 #include "utils.h"
 
+using namespace vixl;  // NOLINT(build/namespaces)
+
 namespace art {
 namespace arm64 {
 
@@ -75,7 +77,7 @@
 
 void Arm64Assembler::AddConstant(Register rd, Register rn, int32_t value,
                                  Condition cond) {
-  if ((cond == AL) || (cond == NV)) {
+  if ((cond == al) || (cond == nv)) {
     // VIXL macro-assembler handles all variants.
     ___ Add(reg_x(rd), reg_x(rn), value);
   } else {
@@ -85,7 +87,7 @@
     temps.Exclude(reg_x(rd), reg_x(rn));
     vixl::Register temp = temps.AcquireX();
     ___ Add(temp, reg_x(rn), value);
-    ___ Csel(reg_x(rd), temp, reg_x(rd), COND_OP(cond));
+    ___ Csel(reg_x(rd), temp, reg_x(rd), cond);
   }
 }
 
@@ -195,7 +197,7 @@
 // Load routines.
 void Arm64Assembler::LoadImmediate(Register dest, int32_t value,
                                    Condition cond) {
-  if ((cond == AL) || (cond == NV)) {
+  if ((cond == al) || (cond == nv)) {
     ___ Mov(reg_x(dest), value);
   } else {
     // temp = value
@@ -205,9 +207,9 @@
       temps.Exclude(reg_x(dest));
       vixl::Register temp = temps.AcquireX();
       ___ Mov(temp, value);
-      ___ Csel(reg_x(dest), temp, reg_x(dest), COND_OP(cond));
+      ___ Csel(reg_x(dest), temp, reg_x(dest), cond);
     } else {
-      ___ Csel(reg_x(dest), reg_x(XZR), reg_x(dest), COND_OP(cond));
+      ___ Csel(reg_x(dest), reg_x(XZR), reg_x(dest), cond);
     }
   }
 }
@@ -557,11 +559,11 @@
     }
     ___ Cmp(reg_w(in_reg.AsOverlappingCoreRegisterLow()), 0);
     if (!out_reg.Equals(in_reg)) {
-      LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
+      LoadImmediate(out_reg.AsCoreRegister(), 0, eq);
     }
-    AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offs.Int32Value(), NE);
+    AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offs.Int32Value(), ne);
   } else {
-    AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offs.Int32Value(), AL);
+    AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offs.Int32Value(), al);
   }
 }
 
@@ -577,9 +579,9 @@
     // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
     ___ Cmp(reg_w(scratch.AsOverlappingCoreRegisterLow()), 0);
     // Move this logic in add constants with flags.
-    AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
+    AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), ne);
   } else {
-    AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
+    AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), al);
   }
   StoreToOffset(scratch.AsCoreRegister(), SP, out_off.Int32Value());
 }
@@ -593,7 +595,7 @@
   vixl::Label exit;
   if (!out_reg.Equals(in_reg)) {
     // FIXME: Who sets the flags here?
-    LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
+    LoadImmediate(out_reg.AsCoreRegister(), 0, eq);
   }
   ___ Cbz(reg_x(in_reg.AsCoreRegister()), &exit);
   LoadFromOffset(out_reg.AsCoreRegister(), in_reg.AsCoreRegister(), 0);
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index ab4999a..788950b 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -34,28 +34,6 @@
 namespace arm64 {
 
 #define MEM_OP(x...)      vixl::MemOperand(x)
-#define COND_OP(x)        static_cast<vixl::Condition>(x)
-
-enum Condition {
-  kNoCondition = -1,
-  EQ = 0,
-  NE = 1,
-  HS = 2,
-  LO = 3,
-  MI = 4,
-  PL = 5,
-  VS = 6,
-  VC = 7,
-  HI = 8,
-  LS = 9,
-  GE = 10,
-  LT = 11,
-  GT = 12,
-  LE = 13,
-  AL = 14,    // Always.
-  NV = 15,    // Behaves as always/al.
-  kMaxCondition = 16,
-};
 
 enum LoadOperandType {
   kLoadSignedByte,
@@ -225,15 +203,15 @@
   void StoreSToOffset(SRegister source, Register base, int32_t offset);
   void StoreDToOffset(DRegister source, Register base, int32_t offset);
 
-  void LoadImmediate(Register dest, int32_t value, Condition cond = AL);
+  void LoadImmediate(Register dest, int32_t value, vixl::Condition cond = vixl::al);
   void Load(Arm64ManagedRegister dst, Register src, int32_t src_offset, size_t size);
   void LoadWFromOffset(LoadOperandType type, WRegister dest,
                       Register base, int32_t offset);
   void LoadFromOffset(Register dest, Register base, int32_t offset);
   void LoadSFromOffset(SRegister dest, Register base, int32_t offset);
   void LoadDFromOffset(DRegister dest, Register base, int32_t offset);
-  void AddConstant(Register rd, int32_t value, Condition cond = AL);
-  void AddConstant(Register rd, Register rn, int32_t value, Condition cond = AL);
+  void AddConstant(Register rd, int32_t value, vixl::Condition cond = vixl::al);
+  void AddConstant(Register rd, Register rn, int32_t value, vixl::Condition cond = vixl::al);
 
   // Vixl buffer.
   byte* vixl_buf_;