Skip extra copy from aggregate where it isn't necessary; rdar://problem/8139919 .  This shouldn't make much of a difference at -O3, but should substantially reduce the number of generated memcpy's at -O0.

Originally r130717, but was backed out due to an ObjC regression.



git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@132102 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index 4c9f3d4..712ae89 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -1189,6 +1189,15 @@
     return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0),
                     type);
 
+  if (hasAggregateLLVMType(type) && isa<ImplicitCastExpr>(E) &&
+      cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
+    LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
+    assert(L.isSimple());
+    args.add(RValue::getAggregate(L.getAddress(), L.isVolatileQualified()),
+             type, /*NeedsCopy*/true);
+    return;
+  }
+
   args.add(EmitAnyExprToTemp(E), type);
 }
 
@@ -1254,6 +1263,10 @@
                             Alignment, I->Ty);
         else
           StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
+      } else if (I->NeedsCopy && !ArgInfo.getIndirectByVal()) {
+        Args.push_back(CreateMemTemp(I->Ty));
+        EmitAggregateCopy(Args.back(), RV.getAggregateAddr(), I->Ty,
+                          RV.isVolatileQualified());
       } else {
         Args.push_back(RV.getAggregateAddr());
       }
diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h
index 3f600c0..160a62e 100644
--- a/lib/CodeGen/CGCall.h
+++ b/lib/CodeGen/CGCall.h
@@ -47,8 +47,9 @@
   struct CallArg {
     RValue RV;
     QualType Ty;
-    CallArg(RValue rv, QualType ty)
-    : RV(rv), Ty(ty)
+    bool NeedsCopy;
+    CallArg(RValue rv, QualType ty, bool needscopy)
+    : RV(rv), Ty(ty), NeedsCopy(needscopy)
     { }
   };
 
@@ -57,8 +58,8 @@
   class CallArgList :
     public llvm::SmallVector<CallArg, 16> {
   public:
-    void add(RValue rvalue, QualType type) {
-      push_back(CallArg(rvalue, type));
+    void add(RValue rvalue, QualType type, bool needscopy = false) {
+      push_back(CallArg(rvalue, type, needscopy));
     }
   };
 
diff --git a/test/CodeGen/byval-memcpy-elim.c b/test/CodeGen/byval-memcpy-elim.c
new file mode 100644
index 0000000..8aa08fb
--- /dev/null
+++ b/test/CodeGen/byval-memcpy-elim.c
@@ -0,0 +1,20 @@
+// RUN: %clang_cc1 -emit-llvm -triple x86_64-apple-darwin10 < %s | FileCheck %s
+
+struct Test1S {
+ long NumDecls;
+ long X;
+ long Y;
+};
+struct Test2S {
+ long NumDecls;
+ long X;
+};
+
+// Make sure we don't generate extra memcpy for lvalues
+void test1a(struct Test1S, struct Test2S);
+// CHECK: define void @test1(
+// CHECK-NOT: memcpy
+// CHECK: call void @test1a
+void test1(struct Test1S *A, struct Test2S *B) {
+  test1a(*A, *B);
+}