ARM64: use 32-bit moves for constants where possible.

If we know that a particular 64-bit constant has all high bits zero, then we
can rely on the fact that 32-bit ARM64 instructions automatically zero out the
high bits of an x-register. This gives the expansion logic less constraints to
satisfy and so sometimes allows it to pick better sequences.

Came up while porting test/CodeGen/AArch64/movw-consts.ll: this will allow a
32-bit MOVN to be used in @test8 soon.

llvm-svn: 206379
diff --git a/llvm/test/CodeGen/ARM64/patchpoint.ll b/llvm/test/CodeGen/ARM64/patchpoint.ll
index 9e5ed6f..dd555b0 100644
--- a/llvm/test/CodeGen/ARM64/patchpoint.ll
+++ b/llvm/test/CodeGen/ARM64/patchpoint.ll
@@ -67,11 +67,11 @@
 entry:
 ; CHECK-LABEL: jscall_patchpoint_codegen2:
 ; CHECK:      Ltmp
-; CHECK:      orr x{{.+}}, xzr, #0x6
+; CHECK:      orr w{{.+}}, wzr, #0x6
 ; CHECK-NEXT: str x{{.+}}, [sp, #24]
 ; CHECK-NEXT: orr w{{.+}}, wzr, #0x4
 ; CHECK-NEXT: str w{{.+}}, [sp, #16]
-; CHECK-NEXT: orr x{{.+}}, xzr, #0x2
+; CHECK-NEXT: orr w{{.+}}, wzr, #0x2
 ; CHECK-NEXT: str x{{.+}}, [sp]
 ; CHECK:      Ltmp
 ; CHECK-NEXT: movz  x16, #65535, lsl #32
@@ -88,15 +88,15 @@
 entry:
 ; CHECK-LABEL: jscall_patchpoint_codegen3:
 ; CHECK:      Ltmp
-; CHECK:      movz  x{{.+}}, #10
+; CHECK:      movz  w{{.+}}, #10
 ; CHECK-NEXT: str x{{.+}}, [sp, #48]
 ; CHECK-NEXT: orr w{{.+}}, wzr, #0x8
 ; CHECK-NEXT: str w{{.+}}, [sp, #36]
-; CHECK-NEXT: orr x{{.+}}, xzr, #0x6
+; CHECK-NEXT: orr w{{.+}}, wzr, #0x6
 ; CHECK-NEXT: str x{{.+}}, [sp, #24]
 ; CHECK-NEXT: orr w{{.+}}, wzr, #0x4
 ; CHECK-NEXT: str w{{.+}}, [sp, #16]
-; CHECK-NEXT: orr x{{.+}}, xzr, #0x2
+; CHECK-NEXT: orr w{{.+}}, wzr, #0x2
 ; CHECK-NEXT: str x{{.+}}, [sp]
 ; CHECK:      Ltmp
 ; CHECK-NEXT: movz  x16, #65535, lsl #32