Rename PaddedSize to AllocSize, in the hope that this
will make it more obvious what it represents, and stop
it being confused with the StoreSize.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@71349 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 0d9d777..45462da 100644
--- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -313,7 +313,7 @@
       EmitZeros(NewOffset - Offset);
 
       const Type *Ty = CPE.getType();
-      Offset = NewOffset + TM.getTargetData()->getTypePaddedSize(Ty);
+      Offset = NewOffset + TM.getTargetData()->getTypeAllocSize(Ty);
 
       O << TAI->getPrivateGlobalPrefix() << "CPI" << getFunctionNumber() << '_'
         << CPI << ":\t\t\t\t\t";
@@ -889,12 +889,12 @@
 
       // We can emit the pointer value into this slot if the slot is an
       // integer slot greater or equal to the size of the pointer.
-      if (TD->getTypePaddedSize(Ty) >= TD->getTypePaddedSize(Op->getType()))
+      if (TD->getTypeAllocSize(Ty) >= TD->getTypeAllocSize(Op->getType()))
         return EmitConstantValueOnly(Op);
 
       O << "((";
       EmitConstantValueOnly(Op);
-      APInt ptrMask = APInt::getAllOnesValue(TD->getTypePaddedSizeInBits(Ty));
+      APInt ptrMask = APInt::getAllOnesValue(TD->getTypeAllocSizeInBits(Ty));
       
       SmallString<40> S;
       ptrMask.toStringUnsigned(S);
@@ -992,14 +992,14 @@
                                           unsigned AddrSpace) {
   // Print the fields in successive locations. Pad to align if needed!
   const TargetData *TD = TM.getTargetData();
-  unsigned Size = TD->getTypePaddedSize(CVS->getType());
+  unsigned Size = TD->getTypeAllocSize(CVS->getType());
   const StructLayout *cvsLayout = TD->getStructLayout(CVS->getType());
   uint64_t sizeSoFar = 0;
   for (unsigned i = 0, e = CVS->getNumOperands(); i != e; ++i) {
     const Constant* field = CVS->getOperand(i);
 
     // Check if padding is needed and insert one or more 0s.
-    uint64_t fieldSize = TD->getTypePaddedSize(field->getType());
+    uint64_t fieldSize = TD->getTypeAllocSize(field->getType());
     uint64_t padSize = ((i == e-1 ? Size : cvsLayout->getElementOffset(i+1))
                         - cvsLayout->getElementOffset(i)) - fieldSize;
     sizeSoFar += fieldSize + padSize;
@@ -1123,7 +1123,7 @@
           << " long double most significant halfword";
       O << '\n';
     }
-    EmitZeros(TD->getTypePaddedSize(Type::X86_FP80Ty) -
+    EmitZeros(TD->getTypeAllocSize(Type::X86_FP80Ty) -
               TD->getTypeStoreSize(Type::X86_FP80Ty), AddrSpace);
     return;
   } else if (CFP->getType() == Type::PPC_FP128Ty) {
@@ -1228,7 +1228,7 @@
 void AsmPrinter::EmitGlobalConstant(const Constant *CV, unsigned AddrSpace) {
   const TargetData *TD = TM.getTargetData();
   const Type *type = CV->getType();
-  unsigned Size = TD->getTypePaddedSize(type);
+  unsigned Size = TD->getTypeAllocSize(type);
 
   if (CV->isNullValue() || isa<UndefValue>(CV)) {
     EmitZeros(Size, AddrSpace);
diff --git a/lib/CodeGen/ELFWriter.cpp b/lib/CodeGen/ELFWriter.cpp
index ddc4e35..7cc1162 100644
--- a/lib/CodeGen/ELFWriter.cpp
+++ b/lib/CodeGen/ELFWriter.cpp
@@ -284,7 +284,7 @@
 
   unsigned Align = TM.getTargetData()->getPreferredAlignment(GV);
   unsigned Size  =
-    TM.getTargetData()->getTypePaddedSize(GV->getType()->getElementType());
+    TM.getTargetData()->getTypeAllocSize(GV->getType()->getElementType());
 
   // If this global has a zero initializer, it is part of the .bss or common
   // section.
diff --git a/lib/CodeGen/MachOWriter.cpp b/lib/CodeGen/MachOWriter.cpp
index d2e8791..c878798 100644
--- a/lib/CodeGen/MachOWriter.cpp
+++ b/lib/CodeGen/MachOWriter.cpp
@@ -281,7 +281,7 @@
   // "giant object for PIC" optimization.
   for (unsigned i = 0, e = CP.size(); i != e; ++i) {
     const Type *Ty = CP[i].getType();
-    unsigned Size = TM.getTargetData()->getTypePaddedSize(Ty);
+    unsigned Size = TM.getTargetData()->getTypeAllocSize(Ty);
 
     MachOWriter::MachOSection *Sec = MOW.getConstSection(CP[i].Val.ConstVal);
     OutputBuffer SecDataOut(Sec->SectionData, is64Bit, isLittleEndian);
@@ -355,7 +355,7 @@
 
 void MachOWriter::AddSymbolToSection(MachOSection *Sec, GlobalVariable *GV) {
   const Type *Ty = GV->getType()->getElementType();
-  unsigned Size = TM.getTargetData()->getTypePaddedSize(Ty);
+  unsigned Size = TM.getTargetData()->getTypeAllocSize(Ty);
   unsigned Align = TM.getTargetData()->getPreferredAlignment(GV);
 
   // Reserve space in the .bss section for this symbol while maintaining the
@@ -400,7 +400,7 @@
 
 void MachOWriter::EmitGlobal(GlobalVariable *GV) {
   const Type *Ty = GV->getType()->getElementType();
-  unsigned Size = TM.getTargetData()->getTypePaddedSize(Ty);
+  unsigned Size = TM.getTargetData()->getTypeAllocSize(Ty);
   bool NoInit = !GV->hasInitializer();
   
   // If this global has a zero initializer, it is part of the .bss or common
@@ -825,7 +825,7 @@
       continue;
     } else if (const ConstantVector *CP = dyn_cast<ConstantVector>(PC)) {
       unsigned ElementSize =
-        TD->getTypePaddedSize(CP->getType()->getElementType());
+        TD->getTypeAllocSize(CP->getType()->getElementType());
       for (unsigned i = 0, e = CP->getNumOperands(); i != e; ++i)
         WorkList.push_back(CPair(CP->getOperand(i), PA+i*ElementSize));
     } else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(PC)) {
@@ -926,10 +926,10 @@
         abort();
       }
     } else if (isa<ConstantAggregateZero>(PC)) {
-      memset((void*)PA, 0, (size_t)TD->getTypePaddedSize(PC->getType()));
+      memset((void*)PA, 0, (size_t)TD->getTypeAllocSize(PC->getType()));
     } else if (const ConstantArray *CPA = dyn_cast<ConstantArray>(PC)) {
       unsigned ElementSize =
-        TD->getTypePaddedSize(CPA->getType()->getElementType());
+        TD->getTypeAllocSize(CPA->getType()->getElementType());
       for (unsigned i = 0, e = CPA->getNumOperands(); i != e; ++i)
         WorkList.push_back(CPair(CPA->getOperand(i), PA+i*ElementSize));
     } else if (const ConstantStruct *CPS = dyn_cast<ConstantStruct>(PC)) {
diff --git a/lib/CodeGen/MachOWriter.h b/lib/CodeGen/MachOWriter.h
index aacaf2c..20a4084 100644
--- a/lib/CodeGen/MachOWriter.h
+++ b/lib/CodeGen/MachOWriter.h
@@ -468,7 +468,7 @@
       
       const Type *Ty = C->getType();
       if (Ty->isPrimitiveType() || Ty->isInteger()) {
-        unsigned Size = TM.getTargetData()->getTypePaddedSize(Ty);
+        unsigned Size = TM.getTargetData()->getTypeAllocSize(Ty);
         switch(Size) {
         default: break; // Fall through to __TEXT,__const
         case 4:
diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 10b7576..8a1dc5d 100644
--- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -5694,7 +5694,7 @@
         // Get the offsets to the 0 and 1 element of the array so that we can
         // select between them.
         SDValue Zero = DAG.getIntPtrConstant(0);
-        unsigned EltSize = (unsigned)TD.getTypePaddedSize(Elts[0]->getType());
+        unsigned EltSize = (unsigned)TD.getTypeAllocSize(Elts[0]->getType());
         SDValue One = DAG.getIntPtrConstant(EltSize);
         
         SDValue Cond = DAG.getSetCC(DL,
diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp
index 2205186..367cf4c 100644
--- a/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -285,7 +285,7 @@
       if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
         if (CI->getZExtValue() == 0) continue;
         uint64_t Offs = 
-          TD.getTypePaddedSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
+          TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
         N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
         if (N == 0)
           // Unhandled operand. Halt "fast" selection and bail.
@@ -294,7 +294,7 @@
       }
       
       // N = N + Idx * ElementSize;
-      uint64_t ElementSize = TD.getTypePaddedSize(Ty);
+      uint64_t ElementSize = TD.getTypeAllocSize(Ty);
       unsigned IdxN = getRegForGEPIndex(Idx);
       if (IdxN == 0)
         // Unhandled operand. Halt "fast" selection and bail.
diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 0787f93..1a3370f 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -3638,7 +3638,7 @@
       // Increment the pointer, VAList, to the next vaarg
       Tmp3 = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList,
                          DAG.getConstant(TLI.getTargetData()->
-                                         getTypePaddedSize(VT.getTypeForMVT()),
+                                         getTypeAllocSize(VT.getTypeForMVT()),
                                          TLI.getPointerTy()));
       // Store the incremented VAList to the legalized pointer
       Tmp3 = DAG.getStore(VAList.getValue(1), dl, Tmp3, Tmp2, V, 0);
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodesEmit.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodesEmit.cpp
index 6e38590..f1da258 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodesEmit.cpp
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodesEmit.cpp
@@ -295,7 +295,7 @@
       Align = TM.getTargetData()->getPrefTypeAlignment(Type);
       if (Align == 0) {
         // Alignment of vector types.  FIXME!
-        Align = TM.getTargetData()->getTypePaddedSize(Type);
+        Align = TM.getTargetData()->getTypeAllocSize(Type);
       }
     }
     
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp
index 1562c13..b340d0c 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp
@@ -128,7 +128,7 @@
   // Given an array type, recursively traverse the elements.
   if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
     const Type *EltTy = ATy->getElementType();
-    uint64_t EltSize = TLI.getTargetData()->getTypePaddedSize(EltTy);
+    uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy);
     for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
       ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
                       StartingOffset + i * EltSize);
@@ -294,7 +294,7 @@
     if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
       if (ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
         const Type *Ty = AI->getAllocatedType();
-        uint64_t TySize = TLI.getTargetData()->getTypePaddedSize(Ty);
+        uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
         unsigned Align =
           std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
                    AI->getAlignment());
@@ -2700,7 +2700,7 @@
       if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
         if (CI->getZExtValue() == 0) continue;
         uint64_t Offs =
-            TD->getTypePaddedSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
+            TD->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
         SDValue OffsVal;
         unsigned PtrBits = TLI.getPointerTy().getSizeInBits();
         if (PtrBits < 64) {
@@ -2715,7 +2715,7 @@
       }
 
       // N = N + Idx * ElementSize;
-      uint64_t ElementSize = TD->getTypePaddedSize(Ty);
+      uint64_t ElementSize = TD->getTypeAllocSize(Ty);
       SDValue IdxN = getValue(Idx);
 
       // If the index is smaller or larger than intptr_t, truncate or extend
@@ -2756,7 +2756,7 @@
     return;   // getValue will auto-populate this.
 
   const Type *Ty = I.getAllocatedType();
-  uint64_t TySize = TLI.getTargetData()->getTypePaddedSize(Ty);
+  uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
   unsigned Align =
     std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
              I.getAlignment());
@@ -5199,7 +5199,7 @@
         // Otherwise, create a stack slot and emit a store to it before the
         // asm.
         const Type *Ty = OpVal->getType();
-        uint64_t TySize = TLI.getTargetData()->getTypePaddedSize(Ty);
+        uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
         unsigned Align  = TLI.getTargetData()->getPrefTypeAlignment(Ty);
         MachineFunction &MF = DAG.getMachineFunction();
         int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align);
@@ -5500,7 +5500,7 @@
   // i32-ness of the optimizer: we do not want to promote to i64 and then
   // multiply on 64-bit targets.
   // FIXME: Malloc inst should go away: PR715.
-  uint64_t ElementSize = TD->getTypePaddedSize(I.getType()->getElementType());
+  uint64_t ElementSize = TD->getTypeAllocSize(I.getType()->getElementType());
   if (ElementSize != 1)
     Src = DAG.getNode(ISD::MUL, getCurDebugLoc(), Src.getValueType(),
                       Src, DAG.getConstant(ElementSize, Src.getValueType()));
@@ -5614,7 +5614,7 @@
         const PointerType *Ty = cast<PointerType>(I->getType());
         const Type *ElementTy = Ty->getElementType();
         unsigned FrameAlign = getByValTypeAlignment(ElementTy);
-        unsigned FrameSize  = getTargetData()->getTypePaddedSize(ElementTy);
+        unsigned FrameSize  = getTargetData()->getTypeAllocSize(ElementTy);
         // For ByVal, alignment should be passed from FE.  BE will guess if
         // this info is not there but there are cases it cannot get right.
         if (F.getParamAlignment(j))
@@ -5747,7 +5747,7 @@
         const PointerType *Ty = cast<PointerType>(Args[i].Ty);
         const Type *ElementTy = Ty->getElementType();
         unsigned FrameAlign = getByValTypeAlignment(ElementTy);
-        unsigned FrameSize  = getTargetData()->getTypePaddedSize(ElementTy);
+        unsigned FrameSize  = getTargetData()->getTypeAllocSize(ElementTy);
         // For ByVal, alignment should come from FE.  BE will guess if this
         // info is not there but there are cases it cannot get right.
         if (Args[i].Alignment)
diff --git a/lib/CodeGen/StackProtector.cpp b/lib/CodeGen/StackProtector.cpp
index c333acd..c179f1e 100644
--- a/lib/CodeGen/StackProtector.cpp
+++ b/lib/CodeGen/StackProtector.cpp
@@ -114,7 +114,7 @@
         if (const ArrayType *AT = dyn_cast<ArrayType>(AI->getAllocatedType()))
           // If an array has more than SSPBufferSize bytes of allocated space,
           // then we emit stack protectors.
-          if (SSPBufferSize <= TD->getTypePaddedSize(AT))
+          if (SSPBufferSize <= TD->getTypeAllocSize(AT))
             return true;
       }
   }