Support GenSelect for x86

kMirOpSelect is an extended MIR that has been generated in order
to remove trivial diamond shapes where the conditional is an
if-eqz or if-nez and on each of the paths there is a move or
const bytecode with same destination register.

This patch enables x86 to generate code for this extended MIR.

A) Handling the constant specialization of kMirOpSelect:
  1) When the true case is zero and result_reg is not same as src_reg:
      xor result_reg, result_reg
      cmp $0, src_reg
      mov t1, $false_case
      cmovnz result_reg, t1
  2) When the false case is zero and result_reg is not same as src_reg:
      xor result_reg, result_reg
      cmp $0, src_reg
      mov t1, $true_case
      cmovz result_reg, t1
  3) All other cases (we do compare first to set eflags):
      cmp $0, src_reg
      mov result_reg, $true_case
      mov t1, $false_case
      cmovnz result_reg, t1
B) Handling the move specialization of kMirOpSelect:
  1) When true case is already in place:
      cmp $0, src_reg
      cmovnz result_reg, false_reg
  2) When false case is already in place:
      cmp $0, src_reg
      cmovz result_reg, true_reg
  3) When neither cases are in place:
      cmp $0, src_reg
      mov result_reg, true_reg
      cmovnz result_reg, false_reg

Change-Id: Ic7c50823208fe82019916476a0a77c6a271679fe
Signed-off-by: Razvan A Lupusoru <razvan.a.lupusoru@intel.com>
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index a2c215c..0ef4034 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -141,7 +141,14 @@
     case kOpSub: opcode = byte_imm ? kX86Sub32RI8 : kX86Sub32RI; break;
     case kOpXor: opcode = byte_imm ? kX86Xor32RI8 : kX86Xor32RI; break;
     case kOpCmp: opcode = byte_imm ? kX86Cmp32RI8 : kX86Cmp32RI; break;
-    case kOpMov: return LoadConstantNoClobber(r_dest_src1, value);
+    case kOpMov:
+      /*
+       * Moving the constant zero into register can be specialized as an xor of the register.
+       * However, that sets eflags while the move does not. For that reason here, always do
+       * the move and if caller is flexible, they should be calling LoadConstantNoClobber instead.
+       */
+      opcode = kX86Mov32RI;
+      break;
     case kOpMul:
       opcode = byte_imm ? kX86Imul32RRI8 : kX86Imul32RRI;
       return NewLIR3(opcode, r_dest_src1, r_dest_src1, value);