Fix typos in comments
llvm-svn: 215777
diff --git a/llvm/lib/CodeGen/AtomicExpandLoadLinkedPass.cpp b/llvm/lib/CodeGen/AtomicExpandLoadLinkedPass.cpp
index 0c37c45..5c40069 100644
--- a/llvm/lib/CodeGen/AtomicExpandLoadLinkedPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandLoadLinkedPass.cpp
@@ -105,7 +105,7 @@
? Monotonic
: LI->getOrdering();
- // The only 64-bit load guaranteed to be single-copy atomic by the ARM ARM is
+ // The only 64-bit load guaranteed to be single-copy atomic by the ARM is
// an ldrexd (A3.5.3).
IRBuilder<> Builder(LI);
Value *Val = TM->getSubtargetImpl()->getTargetLowering()->emitLoadLinked(
diff --git a/llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp b/llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp
index ab2c4b7..287989f 100644
--- a/llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp
+++ b/llvm/lib/Target/AArch64/AArch64AddressTypePromotion.cpp
@@ -19,7 +19,7 @@
// a = add nsw i64 f, 3
// e = getelementptr ..., i64 a
//
-// This is legal to do so if the computations are markers with either nsw or nuw
+// This is legal to do if the computations are marked with either nsw or nuw
// markers.
// Moreover, the current heuristic is simple: it does not create new sext
// operations, i.e., it gives up when a sext would have forked (e.g., if
@@ -223,7 +223,7 @@
}
// Input:
-// - SExtInsts contains all the sext instructions that are use direclty in
+// - SExtInsts contains all the sext instructions that are used directly in
// GetElementPtrInst, i.e., access to memory.
// Algorithm:
// - For each sext operation in SExtInsts:
@@ -353,7 +353,7 @@
// If the use is already of the right type, connect its uses to its argument
// and delete it.
- // This can happen for an Instruction which all uses are sign extended.
+ // This can happen for an Instruction all uses of which are sign extended.
if (!ToRemove.count(SExt) &&
SExt->getType() == SExt->getOperand(0)->getType()) {
DEBUG(dbgs() << "Sign extension is useless, attach its use to "
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index fa38332..0bb304a 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -743,7 +743,7 @@
// On v8, we have particularly efficient implementations of atomic fences
// if they can be combined with nearby atomic loads and stores.
if (!Subtarget->hasV8Ops()) {
- // Automatically insert fences (dmb ist) around ATOMIC_SWAP etc.
+ // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc.
setInsertFencesForAtomic(true);
}
} else {
diff --git a/llvm/lib/Target/ARM/ARMSubtarget.h b/llvm/lib/Target/ARM/ARMSubtarget.h
index 065d5d6..1263e8b 100644
--- a/llvm/lib/Target/ARM/ARMSubtarget.h
+++ b/llvm/lib/Target/ARM/ARMSubtarget.h
@@ -439,7 +439,7 @@
// enableAtomicExpandLoadLinked - True if we need to expand our atomics.
bool enableAtomicExpandLoadLinked() const override;
- /// getInstrItins - Return the instruction itineraies based on subtarget
+ /// getInstrItins - Return the instruction itineraries based on subtarget
/// selection.
const InstrItineraryData *getInstrItineraryData() const {
return &InstrItins;
diff --git a/llvm/lib/Target/Hexagon/HexagonSubtarget.h b/llvm/lib/Target/Hexagon/HexagonSubtarget.h
index 26ff23b..e7cbf1b 100644
--- a/llvm/lib/Target/Hexagon/HexagonSubtarget.h
+++ b/llvm/lib/Target/Hexagon/HexagonSubtarget.h
@@ -56,7 +56,7 @@
HexagonSubtarget(StringRef TT, StringRef CPU, StringRef FS,
const TargetMachine &TM);
- /// getInstrItins - Return the instruction itineraies based on subtarget
+ /// getInstrItins - Return the instruction itineraries based on subtarget
/// selection.
const InstrItineraryData *getInstrItineraryData() const {
return &InstrItins;