Fix code generation of materialized conditions.

Move the logic for knowing if a condition needs to be materialized
in an optimization pass (so that the information does not change
as a side effect of another optimization).

Also clean-up arm and x86_64 codegen:
- arm: ldr and str are for power-users when a constant is
  in play. We should use LoadFromOffset and StoreToOffset.
- x86_64: fix misuses of movq instead of movl.

Change-Id: I01a03b91803624be2281a344a13ad5efbf4f3ef3
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 393eb1a..d37ef06 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -482,10 +482,13 @@
 
 void InstructionCodeGeneratorX86_64::VisitIf(HIf* if_instr) {
   HInstruction* cond = if_instr->InputAt(0);
-  if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
-    // Moves do not affect the eflags register, so if the condition is evaluated
-    // just before the if, we don't need to evaluate it again.
-    if (!cond->IsCondition() || !cond->AsCondition()->IsBeforeWhenDisregardMoves(if_instr)) {
+  bool materialized = !cond->IsCondition() || cond->AsCondition()->NeedsMaterialization();
+  // Moves do not affect the eflags register, so if the condition is evaluated
+  // just before the if, we don't need to evaluate it again.
+  bool eflags_set = cond->IsCondition()
+      && cond->AsCondition()->IsBeforeWhenDisregardMoves(if_instr);
+  if (materialized) {
+    if (!eflags_set) {
       // Materialized condition, compare against 0.
       Location lhs = if_instr->GetLocations()->InAt(0);
       if (lhs.IsRegister()) {
@@ -493,8 +496,11 @@
       } else {
         __ cmpl(Address(CpuRegister(RSP), lhs.GetStackIndex()), Immediate(0));
       }
+      __ j(kNotEqual, codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
+    } else {
+      __ j(X86_64Condition(cond->AsCondition()->GetCondition()),
+           codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
     }
-    __ j(kNotEqual, codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
   } else {
     Location lhs = cond->GetLocations()->InAt(0);
     Location rhs = cond->GetLocations()->InAt(1);
@@ -574,13 +580,13 @@
     // Clear register: setcc only sets the low byte.
     __ xorq(reg, reg);
     if (locations->InAt(1).IsRegister()) {
-      __ cmpq(locations->InAt(0).As<CpuRegister>(),
+      __ cmpl(locations->InAt(0).As<CpuRegister>(),
               locations->InAt(1).As<CpuRegister>());
     } else if (locations->InAt(1).IsConstant()) {
-      __ cmpq(locations->InAt(0).As<CpuRegister>(),
+      __ cmpl(locations->InAt(0).As<CpuRegister>(),
               Immediate(locations->InAt(1).GetConstant()->AsIntConstant()->GetValue()));
     } else {
-      __ cmpq(locations->InAt(0).As<CpuRegister>(),
+      __ cmpl(locations->InAt(0).As<CpuRegister>(),
               Address(CpuRegister(RSP), locations->InAt(1).GetStackIndex()));
     }
     __ setcc(X86_64Condition(comp->GetCondition()), reg);
@@ -879,10 +885,10 @@
   size_t class_offset = mirror::Object::ClassOffset().SizeValue();
   // temp = object->GetClass();
   if (receiver.IsStackSlot()) {
-    __ movq(temp, Address(CpuRegister(RSP), receiver.GetStackIndex()));
-    __ movq(temp, Address(temp, class_offset));
+    __ movl(temp, Address(CpuRegister(RSP), receiver.GetStackIndex()));
+    __ movl(temp, Address(temp, class_offset));
   } else {
-    __ movq(temp, Address(receiver.As<CpuRegister>(), class_offset));
+    __ movl(temp, Address(receiver.As<CpuRegister>(), class_offset));
   }
   // temp = temp->GetMethodAt(method_offset);
   __ movl(temp, Address(temp, method_offset));