1. merge fast-isel-shift-imm.ll into fast-isel-x86-64.ll
2. implement rdar://9289501 - fast isel should fold trivial multiplies to shifts
3. teach tblgen to handle shift immediates that are different sizes than the 
   shifted operands, eliminating some code from the X86 fast isel backend.
4. Have FastISel::SelectBinaryOp use (the poorly named) FastEmit_ri_ function
   instead of FastEmit_ri to simplify code.



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@129666 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/X86/lock-inst-encoding.ll b/test/CodeGen/X86/lock-inst-encoding.ll
index 03468e2..159aeee 100644
--- a/test/CodeGen/X86/lock-inst-encoding.ll
+++ b/test/CodeGen/X86/lock-inst-encoding.ll
@@ -4,10 +4,10 @@
 target triple = "x86_64-apple-darwin10.0.0"
 
 ; CHECK: f0:
-; CHECK: addq %rax, (%rdi)
-; CHECK: # encoding: [0xf0,0x48,0x01,0x07]
+; CHECK: addq %rcx, (%rdi)
+; CHECK: # encoding: [0xf0,0x48,0x01,0x0f]
 ; CHECK: ret
-define void @f0(i64* %a0) {
+define void @f0(i64* %a0) nounwind {
   %t0 = and i64 1, 1
   call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true) nounwind
   %1 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %a0, i64 %t0) nounwind