Same patch as the previous on the store side.  Before we compiled this:

struct DeclGroup {
  unsigned NumDecls;
};

int foo(DeclGroup D) {
  return D.NumDecls;
}

to:

%struct.DeclGroup = type { i32 }

define i32 @_Z3foo9DeclGroup(i64) nounwind ssp noredzone {
entry:
  %D = alloca %struct.DeclGroup, align 4          ; <%struct.DeclGroup*> [#uses=2]
  %tmp = alloca i64                               ; <i64*> [#uses=2]
  store i64 %0, i64* %tmp
  %1 = bitcast i64* %tmp to %struct.DeclGroup*    ; <%struct.DeclGroup*> [#uses=1]
  %2 = load %struct.DeclGroup* %1, align 1        ; <%struct.DeclGroup> [#uses=1]
  store %struct.DeclGroup %2, %struct.DeclGroup* %D
  %tmp1 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i32*> [#uses=1]
  %tmp2 = load i32* %tmp1                         ; <i32> [#uses=1]
  ret i32 %tmp2
}

which caused fast isel bailouts due to the FCA load/store of %2.  Now
we generate this just blissful code:

%struct.DeclGroup = type { i32 }

define i32 @_Z3foo9DeclGroup(i64) nounwind ssp noredzone {
entry:
  %D = alloca %struct.DeclGroup, align 4          ; <%struct.DeclGroup*> [#uses=2]
  %tmp = alloca i64                               ; <i64*> [#uses=2]
  %coerce.dive = getelementptr %struct.DeclGroup* %D, i32 0, i32 0 ; <i32*> [#uses=1]
  store i64 %0, i64* %tmp
  %1 = bitcast i64* %tmp to i32*                  ; <i32*> [#uses=1]
  %2 = load i32* %1, align 1                      ; <i32> [#uses=1]
  store i32 %2, i32* %coerce.dive
  %tmp1 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i32*> [#uses=1]
  %tmp2 = load i32* %tmp1                         ; <i32> [#uses=1]
  ret i32 %tmp2
}

This avoids fastisel bailing out and is groundwork for future patch.
This reduces bailouts on CGStmt.ll to 911 from 935.



git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106974 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index 7ec5e30..7835505 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -345,14 +345,14 @@
   }
 }
 
-/// EnterStructPointerForCoercedLoad - Given a pointer to a struct where we are
+/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
 /// accessing some number of bytes out of it, try to gep into the struct to get
 /// at its inner goodness.  Dive as deep as possible without entering an element
 /// with an in-memory size smaller than DstSize.
 static llvm::Value *
-EnterStructPointerForCoercedLoad(llvm::Value *SrcPtr,
-                                 const llvm::StructType *SrcSTy,
-                                 uint64_t DstSize, CodeGenFunction &CGF) {
+EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
+                                   const llvm::StructType *SrcSTy,
+                                   uint64_t DstSize, CodeGenFunction &CGF) {
   // We can't dive into a zero-element struct.
   if (SrcSTy->getNumElements() == 0) return SrcPtr;
   
@@ -373,7 +373,7 @@
   const llvm::Type *SrcTy =
     cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
   if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
-    return EnterStructPointerForCoercedLoad(SrcPtr, SrcSTy, DstSize, CGF);
+    return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
 
   return SrcPtr;
 }
@@ -394,7 +394,7 @@
   uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
   
   if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
-    SrcPtr = EnterStructPointerForCoercedLoad(SrcPtr, SrcSTy, DstSize, CGF);
+    SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
     SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
   }
   
@@ -438,10 +438,16 @@
                                bool DstIsVolatile,
                                CodeGenFunction &CGF) {
   const llvm::Type *SrcTy = Src->getType();
+  uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
+  
   const llvm::Type *DstTy =
     cast<llvm::PointerType>(DstPtr->getType())->getElementType();
 
-  uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
+  if (const llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
+    DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
+    DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
+  }
+  
   uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
 
   // If store is legal, just bitcast the src pointer.