Fix typos.
Summary: This fixes a variety of typos in docs, code and headers.
Subscribers: jholewinski, sanjoy, arsenm, llvm-commits
Differential Revision: http://reviews.llvm.org/D12626
llvm-svn: 247495
diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
index 45a8820..005ceaa 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
@@ -286,9 +286,9 @@
void
AsmPrinter::emitDwarfAbbrevs(const std::vector<DIEAbbrev *>& Abbrevs) const {
- // For each abbrevation.
+ // For each abbreviation.
for (const DIEAbbrev *Abbrev : Abbrevs) {
- // Emit the abbrevations code (base 1 index.)
+ // Emit the abbreviations code (base 1 index.)
EmitULEB128(Abbrev->getNumber(), "Abbreviation Code");
// Emit the abbreviations data.
diff --git a/llvm/lib/CodeGen/PrologEpilogInserter.cpp b/llvm/lib/CodeGen/PrologEpilogInserter.cpp
index 36819c8..f169f48 100644
--- a/llvm/lib/CodeGen/PrologEpilogInserter.cpp
+++ b/llvm/lib/CodeGen/PrologEpilogInserter.cpp
@@ -897,11 +897,11 @@
if (!MI->getOperand(i).isFI())
continue;
- // Frame indicies in debug values are encoded in a target independent
+ // Frame indices in debug values are encoded in a target independent
// way with simply the frame index and offset rather than any
// target-specific addressing mode.
if (MI->isDebugValue()) {
- assert(i == 0 && "Frame indicies can only appear as the first "
+ assert(i == 0 && "Frame indices can only appear as the first "
"operand of a DBG_VALUE machine instruction");
unsigned Reg;
MachineOperand &Offset = MI->getOperand(1);
diff --git a/llvm/lib/IR/ConstantFold.cpp b/llvm/lib/IR/ConstantFold.cpp
index f63ce9b..1d2f4b2 100644
--- a/llvm/lib/IR/ConstantFold.cpp
+++ b/llvm/lib/IR/ConstantFold.cpp
@@ -83,7 +83,7 @@
assert(DstTy && DstTy->isFirstClassType() && "Invalid cast destination type");
assert(CastInst::isCast(opc) && "Invalid cast opcode");
- // The the types and opcodes for the two Cast constant expressions
+ // The types and opcodes for the two Cast constant expressions
Type *SrcTy = Op->getOperand(0)->getType();
Type *MidTy = Op->getType();
Instruction::CastOps firstOp = Instruction::CastOps(Op->getOpcode());
@@ -1277,9 +1277,9 @@
}
/// IdxCompare - Compare the two constants as though they were getelementptr
-/// indices. This allows coersion of the types to be the same thing.
+/// indices. This allows coercion of the types to be the same thing.
///
-/// If the two constants are the "same" (after coersion), return 0. If the
+/// If the two constants are the "same" (after coercion), return 0. If the
/// first is less than the second, return -1, if the second is less than the
/// first, return 1. If the constants are not integral, return -2.
///
@@ -1999,7 +1999,7 @@
/// \brief Test whether a given ConstantInt is in-range for a SequentialType.
static bool isIndexInRangeOfSequentialType(SequentialType *STy,
const ConstantInt *CI) {
- // And indicies are valid when indexing along a pointer
+ // And indices are valid when indexing along a pointer
if (isa<PointerType>(STy))
return true;
diff --git a/llvm/lib/Target/AMDGPU/R600Instructions.td b/llvm/lib/Target/AMDGPU/R600Instructions.td
index 7beed09..33ef6a4 100644
--- a/llvm/lib/Target/AMDGPU/R600Instructions.td
+++ b/llvm/lib/Target/AMDGPU/R600Instructions.td
@@ -1655,7 +1655,7 @@
// ISel Patterns
//===----------------------------------------------------------------------===//
-// CND*_INT Pattterns for f32 True / False values
+// CND*_INT Patterns for f32 True / False values
class CND_INT_f32 <InstR600 cnd, CondCode cc> : Pat <
(selectcc i32:$src0, 0, f32:$src1, f32:$src2, cc),
diff --git a/llvm/lib/Target/Mips/MipsInstrInfo.td b/llvm/lib/Target/Mips/MipsInstrInfo.td
index 8914643..8844738 100644
--- a/llvm/lib/Target/Mips/MipsInstrInfo.td
+++ b/llvm/lib/Target/Mips/MipsInstrInfo.td
@@ -673,7 +673,7 @@
[(set RO:$rd, (OpNode RO:$rt, GPR32Opnd:$rs))], itin, FrmR,
opstr>;
-// Load Upper Imediate
+// Load Upper Immediate
class LoadUpper<string opstr, RegisterOperand RO, Operand Imm>:
InstSE<(outs RO:$rt), (ins Imm:$imm16), !strconcat(opstr, "\t$rt, $imm16"),
[], II_LUI, FrmI, opstr>, IsAsCheapAsAMove {
diff --git a/llvm/lib/Target/NVPTX/NVPTXUtilities.cpp b/llvm/lib/Target/NVPTX/NVPTXUtilities.cpp
index 51782c6..c293058 100644
--- a/llvm/lib/Target/NVPTX/NVPTXUtilities.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXUtilities.cpp
@@ -357,8 +357,8 @@
}
// consider several special intrinsics in striping pointer casts, and
-// provide an option to ignore GEP indicies for find out the base address only
-// which could be used in simple alias disambigurate.
+// provide an option to ignore GEP indices for find out the base address only
+// which could be used in simple alias disambiguation.
const Value *
llvm::skipPointerTransfer(const Value *V, bool ignore_GEP_indices) {
V = V->stripPointerCasts();
@@ -379,9 +379,9 @@
}
// consider several special intrinsics in striping pointer casts, and
-// - ignore GEP indicies for find out the base address only, and
+// - ignore GEP indices for find out the base address only, and
// - tracking PHINode
-// which could be used in simple alias disambigurate.
+// which could be used in simple alias disambiguation.
const Value *
llvm::skipPointerTransfer(const Value *V, std::set<const Value *> &processed) {
if (processed.find(V) != processed.end())
@@ -428,7 +428,7 @@
return V;
}
-// The following are some useful utilities for debuggung
+// The following are some useful utilities for debugging
BasicBlock *llvm::getParentBlock(Value *v) {
if (BasicBlock *B = dyn_cast<BasicBlock>(v))
@@ -484,7 +484,7 @@
return nullptr;
}
-// Dump an instruction by nane
+// Dump an instruction by name
void llvm::dumpInst(Value *base, char *instName) {
Instruction *I = getInst(base, instName);
if (I)
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index e3179db..3c70f44 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -511,7 +511,7 @@
if (!T->isAggregateType())
return nullptr;
- assert(LI.getAlignment() && "Alignement must be set at this point");
+ assert(LI.getAlignment() && "Alignment must be set at this point");
if (auto *ST = dyn_cast<StructType>(T)) {
// If the struct only have one element, we unpack.
@@ -681,7 +681,7 @@
// FIXME: If the GEP is not inbounds, and there are extra indices after the
// one we'll replace, those could cause the address computation to wrap
// (rendering the IsAllNonNegative() check below insufficient). We can do
- // better, ignoring zero indicies (and other indicies we can prove small
+ // better, ignoring zero indices (and other indices we can prove small
// enough not to wrap).
if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
return false;
@@ -857,7 +857,7 @@
///
/// \returns true if the store was successfully combined away. This indicates
/// the caller must erase the store instruction. We have to let the caller erase
-/// the store instruction sas otherwise there is no way to signal whether it was
+/// the store instruction as otherwise there is no way to signal whether it was
/// combined or not: IC.EraseInstFromFunction returns a null pointer.
static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
// FIXME: We could probably with some care handle both volatile and atomic