From d3c712e50b3e7f06e8027b50e922956fbd00cb70 Mon Sep 17 00:00:00 2001 From: Benjamin Kramer Date: Sat, 8 Aug 2015 18:27:36 +0000 Subject: [PATCH] Fix some comment typos. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@244402 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/Analysis/TargetLibraryInfo.h | 2 +- include/llvm/CodeGen/Analysis.h | 2 +- include/llvm/CodeGen/RegAllocPBQP.h | 2 +- include/llvm/IR/DebugInfoMetadata.h | 4 +- include/llvm/ProfileData/CoverageMapping.h | 4 +- include/llvm/Support/Options.h | 4 +- include/llvm/Support/ScaledNumber.h | 4 +- include/llvm/Support/raw_ostream.h | 4 +- include/llvm/Target/TargetOpcodes.h | 2 +- include/llvm/Transforms/Utils/LoopUtils.h | 4 +- lib/Analysis/CFLAliasAnalysis.cpp | 4 +- lib/Analysis/ScalarEvolutionExpander.cpp | 4 +- lib/Analysis/ValueTracking.cpp | 6 +-- .../AsmPrinter/WinCodeViewLineTables.cpp | 2 +- lib/CodeGen/CodeGenPrepare.cpp | 4 +- lib/CodeGen/GlobalMerge.cpp | 2 +- lib/CodeGen/PeepholeOptimizer.cpp | 4 +- lib/CodeGen/RegisterCoalescer.cpp | 6 +-- lib/CodeGen/RegisterPressure.cpp | 2 +- lib/CodeGen/ScheduleDAGInstrs.cpp | 8 ++-- .../SelectionDAG/ResourcePriorityQueue.cpp | 6 +-- lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 2 +- .../SelectionDAG/SelectionDAGBuilder.cpp | 8 ++-- .../SelectionDAG/StatepointLowering.cpp | 25 ++++++------ lib/CodeGen/TargetLoweringObjectFileImpl.cpp | 2 +- lib/CodeGen/WinEHPrepare.cpp | 2 +- lib/DebugInfo/DWARF/DWARFContext.cpp | 4 +- .../RuntimeDyld/RuntimeDyldELF.cpp | 4 +- lib/IR/AsmWriter.cpp | 2 +- lib/MC/MCELFStreamer.cpp | 4 +- lib/Target/AArch64/AArch64A53Fix835769.cpp | 4 +- lib/Target/AArch64/AArch64FastISel.cpp | 2 +- lib/Target/AArch64/AArch64FrameLowering.cpp | 4 +- lib/Target/AArch64/AArch64ISelLowering.cpp | 4 +- lib/Target/AMDGPU/SIFixSGPRLiveRanges.cpp | 4 +- lib/Target/AMDGPU/SIISelLowering.cpp | 10 ++--- lib/Target/AMDGPU/SIInsertWaits.cpp | 2 +- lib/Target/AMDGPU/SIInstrInfo.cpp | 2 +- lib/Target/ARM/ARMConstantIslandPass.cpp | 2 +- lib/Target/ARM/ARMISelLowering.cpp | 4 +- lib/Target/Hexagon/HexagonGenPredicate.cpp | 2 +- lib/Target/Hexagon/HexagonHardwareLoops.cpp | 6 +-- .../MSP430/MSP430MachineFunctionInfo.cpp | 2 +- lib/Target/Mips/MipsISelLowering.cpp | 2 +- .../NVPTX/NVPTXFavorNonGenericAddrSpaces.cpp | 2 +- lib/Target/NVPTX/NVPTXLowerAlloca.cpp | 2 +- lib/Target/NVPTX/NVPTXLowerKernelArgs.cpp | 2 +- lib/Target/PowerPC/PPCISelDAGToDAG.cpp | 2 +- lib/Target/X86/X86ISelLowering.cpp | 12 +++--- .../InstCombine/InstructionCombining.cpp | 2 +- lib/Transforms/ObjCARC/ObjCARCOpts.cpp | 4 +- lib/Transforms/ObjCARC/PtrState.h | 6 +-- lib/Transforms/Scalar/GVN.cpp | 4 +- lib/Transforms/Scalar/LICM.cpp | 4 +- lib/Transforms/Scalar/LoopInterchange.cpp | 22 +++++------ lib/Transforms/Scalar/LoopUnrollPass.cpp | 4 +- lib/Transforms/Scalar/PlaceSafepoints.cpp | 14 +++---- .../Scalar/RewriteStatepointsForGC.cpp | 38 +++++++++---------- lib/Transforms/Scalar/SROA.cpp | 17 +++++---- lib/Transforms/Utils/CloneFunction.cpp | 2 +- lib/Transforms/Utils/LowerSwitch.cpp | 6 +-- lib/Transforms/Utils/SimplifyIndVar.cpp | 8 ++-- lib/Transforms/Vectorize/SLPVectorizer.cpp | 14 +++---- 63 files changed, 175 insertions(+), 173 deletions(-) diff --git a/include/llvm/Analysis/TargetLibraryInfo.h b/include/llvm/Analysis/TargetLibraryInfo.h index 98e450c867d..7becdf033dd 100644 --- a/include/llvm/Analysis/TargetLibraryInfo.h +++ b/include/llvm/Analysis/TargetLibraryInfo.h @@ -42,7 +42,7 @@ class PreservedAnalyses; /// /// This class constructs tables that hold the target library information and /// make it available. However, it is somewhat expensive to compute and only -/// depends on the triple. So users typicaly interact with the \c +/// depends on the triple. So users typically interact with the \c /// TargetLibraryInfo wrapper below. class TargetLibraryInfoImpl { friend class TargetLibraryInfo; diff --git a/include/llvm/CodeGen/Analysis.h b/include/llvm/CodeGen/Analysis.h index 82d1e8ada17..51d423ef968 100644 --- a/include/llvm/CodeGen/Analysis.h +++ b/include/llvm/CodeGen/Analysis.h @@ -37,7 +37,7 @@ struct EVT; /// Given an LLVM IR aggregate type and a sequence of insertvalue or /// extractvalue indices that identify a member, return the linearized index of /// the start of the member, i.e the number of element in memory before the -/// seeked one. This is disconnected from the number of bytes. +/// sought one. This is disconnected from the number of bytes. /// /// \param Ty is the type indexed by \p Indices. /// \param Indices is an optional pointer in the indices list to the current diff --git a/include/llvm/CodeGen/RegAllocPBQP.h b/include/llvm/CodeGen/RegAllocPBQP.h index 6046e46547b..4122811a9e5 100644 --- a/include/llvm/CodeGen/RegAllocPBQP.h +++ b/include/llvm/CodeGen/RegAllocPBQP.h @@ -134,7 +134,7 @@ inline hash_code hash_value(const AllowedRegVector &OptRegs) { hash_combine_range(OStart, OEnd)); } -/// \brief Holds graph-level metadata relevent to PBQP RA problems. +/// \brief Holds graph-level metadata relevant to PBQP RA problems. class GraphMetadata { private: typedef ValuePool AllowedRegVecPool; diff --git a/include/llvm/IR/DebugInfoMetadata.h b/include/llvm/IR/DebugInfoMetadata.h index a88c16f2621..98d5c2e2f7c 100644 --- a/include/llvm/IR/DebugInfoMetadata.h +++ b/include/llvm/IR/DebugInfoMetadata.h @@ -1389,7 +1389,7 @@ public: void replaceFunction(std::nullptr_t) { replaceOperandWith(7, nullptr); } /// @} - /// \brief Check if this subprogram decribes the given function. + /// \brief Check if this subprogram describes the given function. /// /// FIXME: Should this be looking through bitcasts? bool describes(const Function *F) const; @@ -1943,7 +1943,7 @@ public: /// \brief Check that a location is valid for this variable. /// /// Check that \c DL exists, is in the same subprogram, and has the same - /// inlined-at location as \c this. (Otherwise, it's not a valid attachemnt + /// inlined-at location as \c this. (Otherwise, it's not a valid attachment /// to a \a DbgInfoIntrinsic.) bool isValidLocationForIntrinsic(const DILocation *DL) const { return DL && getScope()->getSubprogram() == DL->getScope()->getSubprogram(); diff --git a/include/llvm/ProfileData/CoverageMapping.h b/include/llvm/ProfileData/CoverageMapping.h index 3488e793d84..71813c84c33 100644 --- a/include/llvm/ProfileData/CoverageMapping.h +++ b/include/llvm/ProfileData/CoverageMapping.h @@ -104,7 +104,7 @@ struct CounterExpression { }; /// \brief A Counter expression builder is used to construct the -/// counter expressions. It avoids unecessary duplication +/// counter expressions. It avoids unnecessary duplication /// and simplifies algebraic expressions. class CounterExpressionBuilder { /// \brief A list of all the counter expressions @@ -443,7 +443,7 @@ public: /// \brief Get the list of function instantiations in the file. /// - /// Fucntions that are instantiated more than once, such as C++ template + /// Functions that are instantiated more than once, such as C++ template /// specializations, have distinct coverage records for each instantiation. std::vector getInstantiations(StringRef Filename); diff --git a/include/llvm/Support/Options.h b/include/llvm/Support/Options.h index 2742d3907c6..7b61b2308f5 100644 --- a/include/llvm/Support/Options.h +++ b/include/llvm/Support/Options.h @@ -71,7 +71,7 @@ private: /// \param Key unique key for option /// \param O option to map to \p Key /// - /// Allocated cl::Options are owened by the OptionRegistry and are deallocated + /// Allocated cl::Options are owned by the OptionRegistry and are deallocated /// on destruction or removal void addOption(void *Key, cl::Option *O); @@ -91,7 +91,7 @@ public: /// Options are keyed off the template parameters to generate unique static /// characters. The template parameters are (1) the type of the data the /// option stores (\p ValT), the class that will read the option (\p Base), - /// and the memeber that the class will store the data into (\p Mem). + /// and the member that the class will store the data into (\p Mem). template static void registerOption(const char *ArgStr, const char *Desc, const ValT &InitValue) { diff --git a/include/llvm/Support/ScaledNumber.h b/include/llvm/Support/ScaledNumber.h index 0a4262b7eec..c6421efc8b4 100644 --- a/include/llvm/Support/ScaledNumber.h +++ b/include/llvm/Support/ScaledNumber.h @@ -282,7 +282,7 @@ int compare(DigitsT LDigits, int16_t LScale, DigitsT RDigits, int16_t RScale) { /// /// As a convenience, returns the matching scale. If the output value of one /// number is zero, returns the scale of the other. If both are zero, which -/// scale is returned is unspecifed. +/// scale is returned is unspecified. template int16_t matchScales(DigitsT &LDigits, int16_t &LScale, DigitsT &RDigits, int16_t &RScale) { @@ -334,7 +334,7 @@ std::pair getSum(DigitsT LDigits, int16_t LScale, DigitsT RDigits, int16_t RScale) { static_assert(!std::numeric_limits::is_signed, "expected unsigned"); - // Check inputs up front. This is only relevent if addition overflows, but + // Check inputs up front. This is only relevant if addition overflows, but // testing here should catch more bugs. assert(LScale < INT16_MAX && "scale too large"); assert(RScale < INT16_MAX && "scale too large"); diff --git a/include/llvm/Support/raw_ostream.h b/include/llvm/Support/raw_ostream.h index 28e512c8694..21f8977b7b3 100644 --- a/include/llvm/Support/raw_ostream.h +++ b/include/llvm/Support/raw_ostream.h @@ -246,7 +246,7 @@ public: /// outputting colored text, or before program exit. virtual raw_ostream &resetColor() { return *this; } - /// Reverses the forground and background colors. + /// Reverses the foreground and background colors. virtual raw_ostream &reverseColor() { return *this; } /// This function determines if this stream is connected to a "tty" or @@ -316,7 +316,7 @@ private: }; /// An abstract base class for streams implementations that also support a -/// pwrite operation. This is usefull for code that can mostly stream out data, +/// pwrite operation. This is useful for code that can mostly stream out data, /// but needs to patch in a header that needs to know the output size. class raw_pwrite_stream : public raw_ostream { virtual void pwrite_impl(const char *Ptr, size_t Size, uint64_t Offset) = 0; diff --git a/include/llvm/Target/TargetOpcodes.h b/include/llvm/Target/TargetOpcodes.h index 50197191109..a7e2aa82a28 100644 --- a/include/llvm/Target/TargetOpcodes.h +++ b/include/llvm/Target/TargetOpcodes.h @@ -126,7 +126,7 @@ enum { /// Loading instruction that may page fault, bundled with associated /// information on how to handle such a page fault. It is intended to support /// "zero cost" null checks in managed languages by allowing LLVM to fold - /// comparisions into existing memory operations. + /// comparisons into existing memory operations. FAULTING_LOAD_OP = 22, }; } // end namespace TargetOpcode diff --git a/include/llvm/Transforms/Utils/LoopUtils.h b/include/llvm/Transforms/Utils/LoopUtils.h index 1550de01b3f..0137e4e27f7 100644 --- a/include/llvm/Transforms/Utils/LoopUtils.h +++ b/include/llvm/Transforms/Utils/LoopUtils.h @@ -125,7 +125,7 @@ public: static InstDesc isRecurrenceInstr(Instruction *I, RecurrenceKind Kind, InstDesc &Prev, bool HasFunNoNaNAttr); - /// Returns true if instuction I has multiple uses in Insts + /// Returns true if instruction I has multiple uses in Insts static bool hasMultipleUsesOf(Instruction *I, SmallPtrSetImpl &Insts); @@ -253,7 +253,7 @@ bool promoteLoopAccessesToScalars(AliasSet &, SmallVectorImpl &, LICMSafetyInfo *); /// \brief Computes safety information for a loop -/// checks loop body & header for the possiblity of may throw +/// checks loop body & header for the possibility of may throw /// exception, it takes LICMSafetyInfo and loop as argument. /// Updates safety information in LICMSafetyInfo argument. void computeLICMSafetyInfo(LICMSafetyInfo *, Loop *); diff --git a/lib/Analysis/CFLAliasAnalysis.cpp b/lib/Analysis/CFLAliasAnalysis.cpp index b76cbb490cf..e87adf14285 100644 --- a/lib/Analysis/CFLAliasAnalysis.cpp +++ b/lib/Analysis/CFLAliasAnalysis.cpp @@ -622,7 +622,7 @@ public: // ----- Various Edge iterators for the graph ----- // // \brief Iterator for edges. Because this graph is bidirected, we don't - // allow modificaiton of the edges using this iterator. Additionally, the + // allow modification of the edges using this iterator. Additionally, the // iterator becomes invalid if you add edges to or from the node you're // getting the edges of. struct EdgeIterator : public std::iteratorgetOpcode() != Instruction::ICmp && CE->getOpcode() != Instruction::FCmp; diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp index fee2a2d0d18..fe2a2a5005e 100644 --- a/lib/Analysis/ScalarEvolutionExpander.cpp +++ b/lib/Analysis/ScalarEvolutionExpander.cpp @@ -1002,7 +1002,7 @@ static void hoistBeforePos(DominatorTree *DT, Instruction *InstToHoist, } /// \brief Check whether we can cheaply express the requested SCEV in terms of -/// the available PHI SCEV by truncation and/or invertion of the step. +/// the available PHI SCEV by truncation and/or inversion of the step. static bool canBeCheaplyTransformed(ScalarEvolution &SE, const SCEVAddRecExpr *Phi, const SCEVAddRecExpr *Requested, @@ -1836,7 +1836,7 @@ bool SCEVExpander::isHighCostExpansionHelper( if (auto *UDivExpr = dyn_cast(S)) { // If the divisor is a power of two and the SCEV type fits in a native - // integer, consider the divison cheap irrespective of whether it occurs in + // integer, consider the division cheap irrespective of whether it occurs in // the user code since it can be lowered into a right shift. if (auto *SC = dyn_cast(UDivExpr->getRHS())) if (SC->getValue()->getValue().isPowerOf2()) { diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp index 275e35992fb..eaf79a74fcb 100644 --- a/lib/Analysis/ValueTracking.cpp +++ b/lib/Analysis/ValueTracking.cpp @@ -43,7 +43,7 @@ const unsigned MaxDepth = 6; /// Enable an experimental feature to leverage information about dominating /// conditions to compute known bits. The individual options below control how -/// hard we search. The defaults are choosen to be fairly aggressive. If you +/// hard we search. The defaults are chosen to be fairly aggressive. If you /// run into compile time problems when testing, scale them back and report /// your findings. static cl::opt EnableDomConditions("value-tracking-dom-conditions", @@ -2545,7 +2545,7 @@ Value *llvm::FindInsertedValue(Value *V, ArrayRef idx_range, } // This insert value inserts something else than what we are looking for. - // See if the (aggregrate) value inserted into has the value we are + // See if the (aggregate) value inserted into has the value we are // looking for, then. if (*req_idx != *i) return FindInsertedValue(I->getAggregateOperand(), idx_range, @@ -2560,7 +2560,7 @@ Value *llvm::FindInsertedValue(Value *V, ArrayRef idx_range, } if (ExtractValueInst *I = dyn_cast(V)) { - // If we're extracting a value from an aggregrate that was extracted from + // If we're extracting a value from an aggregate that was extracted from // something else, we can extract from that something else directly instead. // However, we will need to chain I's indices with the requested indices. diff --git a/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp b/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp index 6610ac78f8c..8b1bea8049e 100644 --- a/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp +++ b/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp @@ -253,7 +253,7 @@ void WinCodeViewLineTables::emitDebugInfoForFunction(const Function *GV) { } FilenameSegmentLengths[LastSegmentEnd] = FI.Instrs.size() - LastSegmentEnd; - // Emit a line table subsection, requred to do PC-to-file:line lookup. + // Emit a line table subsection, required to do PC-to-file:line lookup. Asm->OutStreamer->AddComment("Line table subsection for " + Twine(FuncName)); Asm->EmitInt32(COFF::DEBUG_LINE_TABLE_SUBSECTION); MCSymbol *LineTableBegin = Asm->MMI->getContext().createTempSymbol(), diff --git a/lib/CodeGen/CodeGenPrepare.cpp b/lib/CodeGen/CodeGenPrepare.cpp index 6e7f525736d..3768807702b 100644 --- a/lib/CodeGen/CodeGenPrepare.cpp +++ b/lib/CodeGen/CodeGenPrepare.cpp @@ -1089,7 +1089,7 @@ static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, // ScalarizeMaskedLoad() translates masked load intrinsic, like // <16 x i32 > @llvm.masked.load( <16 x i32>* %addr, i32 align, // <16 x i1> %mask, <16 x i32> %passthru) -// to a chain of basic blocks, whith loading element one-by-one if +// to a chain of basic blocks, with loading element one-by-one if // the appropriate mask bit is set // // %1 = bitcast i8* %addr to i32* @@ -4135,7 +4135,7 @@ class VectorPromoteHelper { /// \brief Generate a constant vector with \p Val with the same /// number of elements as the transition. /// \p UseSplat defines whether or not \p Val should be replicated - /// accross the whole vector. + /// across the whole vector. /// In other words, if UseSplat == true, we generate , /// otherwise we generate a vector with as many undef as possible: /// where \p Val is only diff --git a/lib/CodeGen/GlobalMerge.cpp b/lib/CodeGen/GlobalMerge.cpp index 04c97f6796c..d477143d62d 100644 --- a/lib/CodeGen/GlobalMerge.cpp +++ b/lib/CodeGen/GlobalMerge.cpp @@ -211,7 +211,7 @@ bool GlobalMerge::doMerge(SmallVectorImpl &Globals, // If we want to be smarter, look at all uses of each global, to try to // discover all sets of globals used together, and how many times each of - // these sets occured. + // these sets occurred. // // Keep this reasonably efficient, by having an append-only list of all sets // discovered so far (UsedGlobalSet), and mapping each "together-ness" unit of diff --git a/lib/CodeGen/PeepholeOptimizer.cpp b/lib/CodeGen/PeepholeOptimizer.cpp index ef213e4347f..21d603b19ef 100644 --- a/lib/CodeGen/PeepholeOptimizer.cpp +++ b/lib/CodeGen/PeepholeOptimizer.cpp @@ -327,7 +327,7 @@ namespace { /// \brief Following the use-def chain, get the next available source /// for the tracked value. - /// \return A ValueTrackerResult containing the a set of registers + /// \return A ValueTrackerResult containing a set of registers /// and sub registers with tracked values. A ValueTrackerResult with /// an empty set of registers means no source was found. ValueTrackerResult getNextSource(); @@ -1567,7 +1567,7 @@ ValueTrackerResult ValueTracker::getNextSource() { Res.setInst(Def); // If we can still move up in the use-def chain, move to the next - // defintion. + // definition. if (!TargetRegisterInfo::isPhysicalRegister(Reg) && OneRegSrc) { Def = MRI.getVRegDef(Reg); DefIdx = MRI.def_begin(Reg).getOperandNo(); diff --git a/lib/CodeGen/RegisterCoalescer.cpp b/lib/CodeGen/RegisterCoalescer.cpp index c911b9b47ea..292e836c495 100644 --- a/lib/CodeGen/RegisterCoalescer.cpp +++ b/lib/CodeGen/RegisterCoalescer.cpp @@ -1822,7 +1822,7 @@ public: /// Removes subranges starting at copies that get removed. This sometimes /// happens when undefined subranges are copied around. These ranges contain - /// no usefull information and can be removed. + /// no useful information and can be removed. void pruneSubRegValues(LiveInterval &LI, unsigned &ShrinkMask); /// Erase any machine instructions that have been coalesced away. @@ -2410,7 +2410,7 @@ void JoinVals::pruneSubRegValues(LiveInterval &LI, unsigned &ShrinkMask) continue; } // If a subrange ends at the copy, then a value was copied but only - // partially used later. Shrink the subregister range apropriately. + // partially used later. Shrink the subregister range appropriately. if (Q.valueIn() != nullptr && Q.valueOut() == nullptr) { DEBUG(dbgs() << "\t\tDead uses at sublane " << format("%04X", S.LaneMask) << " at " << Def << "\n"); @@ -2799,7 +2799,7 @@ bool RegisterCoalescer::applyTerminalRule(const MachineInstr &Copy) const { !isTerminalReg(DstReg, Copy, MRI)) return false; - // DstReg is a terminal node. Check if it inteferes with any other + // DstReg is a terminal node. Check if it interferes with any other // copy involving SrcReg. const MachineBasicBlock *OrigBB = Copy.getParent(); const LiveInterval &DstLI = LIS->getInterval(DstReg); diff --git a/lib/CodeGen/RegisterPressure.cpp b/lib/CodeGen/RegisterPressure.cpp index c3786e552a1..3456bb6f9f8 100644 --- a/lib/CodeGen/RegisterPressure.cpp +++ b/lib/CodeGen/RegisterPressure.cpp @@ -966,7 +966,7 @@ void RegPressureTracker::bumpDownwardPressure(const MachineInstr *MI) { /// This is expensive for an on-the-fly query because it calls /// bumpDownwardPressure to recompute the pressure sets based on current /// liveness. We don't yet have a fast version of downward pressure tracking -/// analagous to getUpwardPressureDelta. +/// analogous to getUpwardPressureDelta. void RegPressureTracker:: getMaxDownwardPressureDelta(const MachineInstr *MI, RegPressureDelta &Delta, ArrayRef CriticalPSets, diff --git a/lib/CodeGen/ScheduleDAGInstrs.cpp b/lib/CodeGen/ScheduleDAGInstrs.cpp index 9c07f5be8b6..7ce2e0d3beb 100644 --- a/lib/CodeGen/ScheduleDAGInstrs.cpp +++ b/lib/CodeGen/ScheduleDAGInstrs.cpp @@ -508,7 +508,7 @@ static inline bool isUnsafeMemoryObject(MachineInstr *MI, return false; } -/// This returns true if the two MIs need a chain edge betwee them. +/// This returns true if the two MIs need a chain edge between them. /// If these are not even memory operations, we still may need /// chain deps between them. The question really is - could /// these two MIs be reordered during scheduling from memory dependency @@ -670,7 +670,7 @@ static inline void addChainDependency(AliasAnalysis *AA, unsigned TrueMemOrderLatency = 0, bool isNormalMemory = false) { // If this is a false dependency, - // do not add the edge, but rememeber the rejected node. + // do not add the edge, but remember the rejected node. if (MIsNeedChainEdge(AA, MFI, DL, SUa->getInstr(), SUb->getInstr())) { SDep Dep(SUa, isNormalMemory ? SDep::MayAliasMem : SDep::Barrier); Dep.setLatency(TrueMemOrderLatency); @@ -685,7 +685,7 @@ static inline void addChainDependency(AliasAnalysis *AA, } } -/// Create an SUnit for each real instruction, numbered in top-down toplological +/// Create an SUnit for each real instruction, numbered in top-down topological /// order. The instruction order A < B, implies that no edge exists from B to A. /// /// Map each real instruction to its SUnit. @@ -766,7 +766,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, // We build scheduling units by walking a block's instruction list from bottom // to top. - // Remember where a generic side-effecting instruction is as we procede. + // Remember where a generic side-effecting instruction is as we proceed. SUnit *BarrierChain = nullptr, *AliasChain = nullptr; // Memory references to specific known memory locations are tracked diff --git a/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp b/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp index 6303422b9ae..622e06f0da2 100644 --- a/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp +++ b/lib/CodeGen/SelectionDAG/ResourcePriorityQueue.cpp @@ -49,7 +49,7 @@ ResourcePriorityQueue::ResourcePriorityQueue(SelectionDAGISel *IS) TII = STI.getInstrInfo(); ResourcesModel.reset(TII->CreateTargetScheduleState(STI)); // This hard requirement could be relaxed, but for now - // do not let it procede. + // do not let it proceed. assert(ResourcesModel && "Unimplemented CreateTargetScheduleState."); unsigned NumRC = TRI->getNumRegClasses(); @@ -269,12 +269,12 @@ bool ResourcePriorityQueue::isResourceAvailable(SUnit *SU) { } // Now see if there are no other dependencies - // to instructions alredy in the packet. + // to instructions already in the packet. for (unsigned i = 0, e = Packet.size(); i != e; ++i) for (SUnit::const_succ_iterator I = Packet[i]->Succs.begin(), E = Packet[i]->Succs.end(); I != E; ++I) { // Since we do not add pseudos to packets, might as well - // ignor order deps. + // ignore order deps. if (I->isCtrl()) continue; diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 8a382270b82..61eb4cbbfd1 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -2677,7 +2677,7 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{ const int rIndex = Items - 1 - cast(Op.getOperand(1))->getZExtValue(); - // If the sign portion ends in our element the substraction gives correct + // If the sign portion ends in our element the subtraction gives correct // result. Otherwise it gives either negative or > bitwidth result return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0); } diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 99a74cdb8e6..e31ba326d09 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -3069,7 +3069,7 @@ void SelectionDAGBuilder::visitStore(const StoreInst &I) { void SelectionDAGBuilder::visitMaskedStore(const CallInst &I) { SDLoc sdl = getCurSDLoc(); - // llvm.masked.store.*(Src0, Ptr, alignemt, Mask) + // llvm.masked.store.*(Src0, Ptr, alignment, Mask) Value *PtrOperand = I.getArgOperand(1); SDValue Ptr = getValue(PtrOperand); SDValue Src0 = getValue(I.getArgOperand(0)); @@ -3094,13 +3094,13 @@ void SelectionDAGBuilder::visitMaskedStore(const CallInst &I) { } // Gather/scatter receive a vector of pointers. -// This vector of pointers may be represented as a base pointer + vector of -// indices, it depends on GEP and instruction preceeding GEP +// This vector of pointers may be represented as a base pointer + vector of +// indices, it depends on GEP and instruction preceding GEP // that calculates indices static bool getUniformBase(Value *& Ptr, SDValue& Base, SDValue& Index, SelectionDAGBuilder* SDB) { - assert (Ptr->getType()->isVectorTy() && "Uexpected pointer type"); + assert(Ptr->getType()->isVectorTy() && "Unexpected pointer type"); GetElementPtrInst *Gep = dyn_cast(Ptr); if (!Gep || Gep->getNumOperands() > 2) return false; diff --git a/lib/CodeGen/SelectionDAG/StatepointLowering.cpp b/lib/CodeGen/SelectionDAG/StatepointLowering.cpp index c62428d308e..7eb8728e1fc 100644 --- a/lib/CodeGen/SelectionDAG/StatepointLowering.cpp +++ b/lib/CodeGen/SelectionDAG/StatepointLowering.cpp @@ -105,8 +105,8 @@ StatepointLoweringState::allocateStackSlot(EVT ValueType, return Builder.DAG.getFrameIndex(FI, ValueType); } // Note: We deliberately choose to advance this only on the failing path. - // Doing so on the suceeding path involes a bit of complexity that caused a - // minor bug previously. Unless performance shows this matters, please + // Doing so on the succeeding path involves a bit of complexity that caused + // a minor bug previously. Unless performance shows this matters, please // keep this code as simple as possible. NextSlotToAllocate++; } @@ -119,7 +119,7 @@ StatepointLoweringState::allocateStackSlot(EVT ValueType, static Optional findPreviousSpillSlot(const Value *Val, SelectionDAGBuilder &Builder, int LookUpDepth) { - // Can not look any futher - give up now + // Can not look any further - give up now if (LookUpDepth <= 0) return Optional(); @@ -196,7 +196,7 @@ static Optional findPreviousSpillSlot(const Value *Val, /// Try to find existing copies of the incoming values in stack slots used for /// statepoint spilling. If we can find a spill slot for the incoming value, /// mark that slot as allocated, and reuse the same slot for this safepoint. -/// This helps to avoid series of loads and stores that only serve to resuffle +/// This helps to avoid series of loads and stores that only serve to reshuffle /// values on the stack between calls. static void reservePreviousStackSlotForValue(const Value *IncomingValue, SelectionDAGBuilder &Builder) { @@ -255,7 +255,7 @@ static void removeDuplicatesGCPtrs(SmallVectorImpl &Bases, SmallVectorImpl &Relocs, SelectionDAGBuilder &Builder) { - // This is horribly ineffecient, but I don't care right now + // This is horribly inefficient, but I don't care right now SmallSet Seen; SmallVector NewBases, NewPtrs, NewRelocs; @@ -347,11 +347,11 @@ lowerCallFromStatepoint(ImmutableStatepoint ISP, MachineBasicBlock *LandingPad, if (CS.isInvoke()) { // Result value will be used in different basic block for invokes // so we need to export it now. But statepoint call has a different type - // than the actuall call. It means that standart exporting mechanism will + // than the actual call. It means that standard exporting mechanism will // create register of the wrong type. So instead we need to create // register with correct type and save value into it manually. // TODO: To eliminate this problem we can remove gc.result intrinsics - // completelly and make statepoint call to return a tuple. + // completely and make statepoint call to return a tuple. unsigned Reg = Builder.FuncInfo.CreateRegs(ISP.getActualReturnType()); RegsForValue RFV( *Builder.DAG.getContext(), Builder.DAG.getTargetLoweringInfo(), @@ -597,16 +597,17 @@ static void lowerStatepointMetaArgs(SmallVectorImpl &Ops, SpillMap[V] = cast(Loc)->getIndex(); } else { // Record value as visited, but not spilled. This is case for allocas - // and constants. For this values we can avoid emiting spill load while + // and constants. For this values we can avoid emitting spill load while // visiting corresponding gc_relocate. // Actually we do not need to record them in this map at all. - // We do this only to check that we are not relocating any unvisited value. + // We do this only to check that we are not relocating any unvisited + // value. SpillMap[V] = None; // Default llvm mechanisms for exporting values which are used in // different basic blocks does not work for gc relocates. // Note that it would be incorrect to teach llvm that all relocates are - // uses of the corresponging values so that it would automatically + // uses of the corresponding values so that it would automatically // export them. Relocates of the spilled values does not use original // value. if (StatepointSite.getCallSite().isInvoke()) @@ -806,7 +807,7 @@ void SelectionDAGBuilder::LowerStatepoint( // Replace original call DAG.ReplaceAllUsesWith(CallNode, SinkNode); // This may update Root - // Remove originall call node + // Remove original call node DAG.DeleteNode(CallNode); // DON'T set the root - under the assumption that it's already set past the @@ -878,7 +879,7 @@ void SelectionDAGBuilder::visitGCRelocate(const CallInst &CI) { // Be conservative: flush all pending loads // TODO: Probably we can be less restrictive on this, - // it may allow more scheduling opprtunities + // it may allow more scheduling opportunities. SDValue Chain = getRoot(); SDValue SpillLoad = diff --git a/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/lib/CodeGen/TargetLoweringObjectFileImpl.cpp index b4383cca1b9..3b205d0760e 100644 --- a/lib/CodeGen/TargetLoweringObjectFileImpl.cpp +++ b/lib/CodeGen/TargetLoweringObjectFileImpl.cpp @@ -705,7 +705,7 @@ MCSymbol *TargetLoweringObjectFileMachO::getCFIPersonalitySymbol( const MCExpr *TargetLoweringObjectFileMachO::getIndirectSymViaGOTPCRel( const MCSymbol *Sym, const MCValue &MV, int64_t Offset, MachineModuleInfo *MMI, MCStreamer &Streamer) const { - // Although MachO 32-bit targets do not explictly have a GOTPCREL relocation + // Although MachO 32-bit targets do not explicitly have a GOTPCREL relocation // as 64-bit do, we replace the GOT equivalent by accessing the final symbol // through a non_lazy_ptr stub instead. One advantage is that it allows the // computation of deltas to final external symbols. Example: diff --git a/lib/CodeGen/WinEHPrepare.cpp b/lib/CodeGen/WinEHPrepare.cpp index 0d26ed333ca..ff52421fb52 100644 --- a/lib/CodeGen/WinEHPrepare.cpp +++ b/lib/CodeGen/WinEHPrepare.cpp @@ -2146,7 +2146,7 @@ void WinEHPrepare::mapLandingPadBlocks(LandingPadInst *LPad, // Under some circumstances optimized IR will flow unconditionally into a // handler block without checking the selector. This can only happen if // the landing pad has a catch-all handler and the handler for the - // preceeding catch clause is identical to the catch-call handler + // preceding catch clause is identical to the catch-call handler // (typically an empty catch). In this case, the handler must be shared // by all remaining clauses. if (isa( diff --git a/lib/DebugInfo/DWARF/DWARFContext.cpp b/lib/DebugInfo/DWARF/DWARFContext.cpp index 4316934df04..2e7bcaf96c0 100644 --- a/lib/DebugInfo/DWARF/DWARFContext.cpp +++ b/lib/DebugInfo/DWARF/DWARFContext.cpp @@ -705,8 +705,8 @@ DWARFContextInMemory::DWARFContextInMemory(const object::ObjectFile &Obj, // (Address of Section in File) + // (Load Address of Section) if (L != nullptr && RSec != Obj.section_end()) { - // RSec is now either the section being targetted or the section - // containing the symbol being targetted. In either case, + // RSec is now either the section being targeted or the section + // containing the symbol being targeted. In either case, // we need to perform the same computation. StringRef SecName; RSec->getName(SecName); diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp index 16612c146dd..66c4d459343 100644 --- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp +++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp @@ -1476,11 +1476,11 @@ relocation_iterator RuntimeDyldELF::processRelocationRef( // These relocations are supposed to subtract the TOC address from // the final value. This does not fit cleanly into the RuntimeDyld // scheme, since there may be *two* sections involved in determining - // the relocation value (the section of the symbol refered to by the + // the relocation value (the section of the symbol referred to by the // relocation, and the TOC section associated with the current module). // // Fortunately, these relocations are currently only ever generated - // refering to symbols that themselves reside in the TOC, which means + // referring to symbols that themselves reside in the TOC, which means // that the two sections are actually the same. Thus they cancel out // and we can immediately resolve the relocation right now. switch (RelType) { diff --git a/lib/IR/AsmWriter.cpp b/lib/IR/AsmWriter.cpp index 4f4e9850061..76b3b3103c1 100644 --- a/lib/IR/AsmWriter.cpp +++ b/lib/IR/AsmWriter.cpp @@ -3327,7 +3327,7 @@ void Value::print(raw_ostream &ROS, ModuleSlotTracker &MST) const { /// Print without a type, skipping the TypePrinting object. /// -/// \return \c true iff printing was succesful. +/// \return \c true iff printing was successful. static bool printWithoutType(const Value &V, raw_ostream &O, SlotTracker *Machine, const Module *M) { if (V.hasName() || isa(V) || diff --git a/lib/MC/MCELFStreamer.cpp b/lib/MC/MCELFStreamer.cpp index fe9ac21e17f..7588ea38823 100644 --- a/lib/MC/MCELFStreamer.cpp +++ b/lib/MC/MCELFStreamer.cpp @@ -134,7 +134,7 @@ void MCELFStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) { llvm_unreachable("invalid assembler flag!"); } -// If bundle aligment is used and there are any instructions in the section, it +// If bundle alignment is used and there are any instructions in the section, it // needs to be aligned to at least the bundle size. static void setSectionAlignmentForBundling(const MCAssembler &Assembler, MCSection *Section) { @@ -603,7 +603,7 @@ void MCELFStreamer::EmitBundleUnlock() { report_fatal_error("Empty bundle-locked group is forbidden"); // When the -mc-relax-all flag is used, we emit instructions to fragments - // stored on a stack. When the bundle unlock is emited, we pop a fragment + // stored on a stack. When the bundle unlock is emitted, we pop a fragment // from the stack a merge it to the one below. if (getAssembler().getRelaxAll()) { assert(!BundleGroups.empty() && "There are no bundle groups"); diff --git a/lib/Target/AArch64/AArch64A53Fix835769.cpp b/lib/Target/AArch64/AArch64A53Fix835769.cpp index 398269433ec..6c214f6b411 100644 --- a/lib/Target/AArch64/AArch64A53Fix835769.cpp +++ b/lib/Target/AArch64/AArch64A53Fix835769.cpp @@ -216,8 +216,8 @@ AArch64A53Fix835769::runOnBasicBlock(MachineBasicBlock &MBB) { ++Idx; } - DEBUG(dbgs() << "Scan complete, "<< Sequences.size() - << " occurences of pattern found.\n"); + DEBUG(dbgs() << "Scan complete, " << Sequences.size() + << " occurrences of pattern found.\n"); // Then update the basic block, inserting nops between the detected sequences. for (auto &MI : Sequences) { diff --git a/lib/Target/AArch64/AArch64FastISel.cpp b/lib/Target/AArch64/AArch64FastISel.cpp index ab24fe4454e..eaa4173e33f 100644 --- a/lib/Target/AArch64/AArch64FastISel.cpp +++ b/lib/Target/AArch64/AArch64FastISel.cpp @@ -969,7 +969,7 @@ bool AArch64FastISel::simplifyAddress(Address &Addr, MVT VT) { // Cannot encode an offset register and an immediate offset in the same // instruction. Fold the immediate offset into the load/store instruction and - // emit an additonal add to take care of the offset register. + // emit an additional add to take care of the offset register. if (!ImmediateOffsetNeedsLowering && Addr.getOffset() && Addr.getOffsetReg()) RegisterOffsetNeedsLowering = true; diff --git a/lib/Target/AArch64/AArch64FrameLowering.cpp b/lib/Target/AArch64/AArch64FrameLowering.cpp index f43fb23b937..5b8db3d157f 100644 --- a/lib/Target/AArch64/AArch64FrameLowering.cpp +++ b/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -72,9 +72,9 @@ // // For most functions, some of the frame areas are empty. For those functions, // it may not be necessary to set up fp or bp: -// * A base pointer is definitly needed when there are both VLAs and local +// * A base pointer is definitely needed when there are both VLAs and local // variables with more-than-default alignment requirements. -// * A frame pointer is definitly needed when there are local variables with +// * A frame pointer is definitely needed when there are local variables with // more-than-default alignment requirements. // // In some cases when a base pointer is not strictly needed, it is generated diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp index fd8ca209c73..3eecaaaabcf 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1466,7 +1466,7 @@ static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, // cmn w0, #1 // Fundamental, we're relying on the property that (zext LHS) == (zext RHS) // if and only if (sext LHS) == (sext RHS). The checks are in place to - // ensure both the LHS and RHS are truely zero extended and to make sure the + // ensure both the LHS and RHS are truly zero extended and to make sure the // transformation is profitable. if ((RHSC->getZExtValue() >> 16 == 0) && isa(LHS) && cast(LHS)->getExtensionType() == ISD::ZEXTLOAD && @@ -8385,7 +8385,7 @@ static SDValue replaceSplatVectorStore(SelectionDAG &DAG, StoreSDNode *St) { unsigned Alignment = std::min(OrigAlignment, EltOffset); // Create scalar stores. This is at least as good as the code sequence for a - // split unaligned store wich is a dup.s, ext.b, and two stores. + // split unaligned store which is a dup.s, ext.b, and two stores. // Most of the time the three stores should be replaced by store pair // instructions (stp). SDLoc DL(St); diff --git a/lib/Target/AMDGPU/SIFixSGPRLiveRanges.cpp b/lib/Target/AMDGPU/SIFixSGPRLiveRanges.cpp index 0c54446b0fb..7fed3afe843 100644 --- a/lib/Target/AMDGPU/SIFixSGPRLiveRanges.cpp +++ b/lib/Target/AMDGPU/SIFixSGPRLiveRanges.cpp @@ -42,7 +42,7 @@ /// ENDIF /// %use /// -/// Adding this use will make the def live thoughout the IF branch, which is +/// Adding this use will make the def live throughout the IF branch, which is /// what we want. #include "AMDGPU.h" @@ -138,7 +138,7 @@ bool SIFixSGPRLiveRanges::runOnMachineFunction(MachineFunction &MF) { if (MBB.succ_size() < 2) continue; - // We have structured control flow, so number of succesors should be two. + // We have structured control flow, so number of successors should be two. assert(MBB.succ_size() == 2); MachineBasicBlock *SuccA = *MBB.succ_begin(); MachineBasicBlock *SuccB = *(++MBB.succ_begin()); diff --git a/lib/Target/AMDGPU/SIISelLowering.cpp b/lib/Target/AMDGPU/SIISelLowering.cpp index 3d4fbb9fcaa..b90ba886bdd 100644 --- a/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/lib/Target/AMDGPU/SIISelLowering.cpp @@ -548,7 +548,7 @@ SDValue SITargetLowering::LowerFormalArguments( assert((PSInputNum <= 15) && "Too many PS inputs!"); if (!Arg.Used) { - // We can savely skip PS inputs + // We can safely skip PS inputs Skipped.set(i); ++PSInputNum; continue; @@ -565,7 +565,7 @@ SDValue SITargetLowering::LowerFormalArguments( // We REALLY want the ORIGINAL number of vertex elements here, e.g. a // three or five element vertex only needs three or five registers, - // NOT four or eigth. + // NOT four or eight. Type *ParamType = FType->getParamType(Arg.getOrigArgIndex()); unsigned NumElements = ParamType->getVectorNumElements(); @@ -2248,9 +2248,9 @@ MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG, } /// \brief Return a resource descriptor with the 'Add TID' bit enabled -/// The TID (Thread ID) is multipled by the stride value (bits [61:48] -/// of the resource descriptor) to create an offset, which is added to the -/// resource ponter. +/// The TID (Thread ID) is multiplied by the stride value (bits [61:48] +/// of the resource descriptor) to create an offset, which is added to +/// the resource pointer. MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, SDLoc DL, SDValue Ptr, diff --git a/lib/Target/AMDGPU/SIInsertWaits.cpp b/lib/Target/AMDGPU/SIInsertWaits.cpp index 90a37f17468..df76b457af3 100644 --- a/lib/Target/AMDGPU/SIInsertWaits.cpp +++ b/lib/Target/AMDGPU/SIInsertWaits.cpp @@ -261,7 +261,7 @@ void SIInsertWaits::pushInstruction(MachineBasicBlock &MBB, if (MBB.getParent()->getSubtarget().getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { - // Any occurence of consecutive VMEM or SMEM instructions forms a VMEM + // Any occurrence of consecutive VMEM or SMEM instructions forms a VMEM // or SMEM clause, respectively. // // The temporary workaround is to break the clauses with S_NOP. diff --git a/lib/Target/AMDGPU/SIInstrInfo.cpp b/lib/Target/AMDGPU/SIInstrInfo.cpp index 68c59a1f6ca..bfd765aa5fe 100644 --- a/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -1088,7 +1088,7 @@ bool SIInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa, // TODO: Should we check the address space from the MachineMemOperand? That // would allow us to distinguish objects we know don't alias based on the - // underlying addres space, even if it was lowered to a different one, + // underlying address space, even if it was lowered to a different one, // e.g. private accesses lowered to use MUBUF instructions on a scratch // buffer. if (isDS(Opc0)) { diff --git a/lib/Target/ARM/ARMConstantIslandPass.cpp b/lib/Target/ARM/ARMConstantIslandPass.cpp index f3a6027107c..b6a2f7fa0d5 100644 --- a/lib/Target/ARM/ARMConstantIslandPass.cpp +++ b/lib/Target/ARM/ARMConstantIslandPass.cpp @@ -2058,7 +2058,7 @@ bool ARMConstantIslands::preserveBaseRegister(MachineInstr *JumpMI, /// \brief Returns whether CPEMI is the first instruction in the block /// immediately following JTMI (assumed to be a TBB or TBH terminator). If so, /// we can switch the first register to PC and usually remove the address -/// calculation that preceeded it. +/// calculation that preceded it. static bool jumpTableFollowsTB(MachineInstr *JTMI, MachineInstr *CPEMI) { MachineFunction::iterator MBB = JTMI->getParent(); MachineFunction *MF = MBB->getParent(); diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index ea0dd6b2a1c..6b7fea400d4 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -609,7 +609,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, setTargetDAGCombine(ISD::ADDC); if (Subtarget->isFPOnlySP()) { - // When targetting a floating-point unit with only single-precision + // When targeting a floating-point unit with only single-precision // operations, f64 is legal for the few double-precision instructions which // are present However, no double-precision operations other than moves, // loads and stores are provided by the hardware. @@ -11597,7 +11597,7 @@ bool ARMTargetLowering::canCombineStoreAndExtract(Type *VectorTy, Value *Idx, return false; // Floating point values and vector values map to the same register file. - // Therefore, althought we could do a store extract of a vector type, this is + // Therefore, although we could do a store extract of a vector type, this is // better to leave at float as we have more freedom in the addressing mode for // those. if (VectorTy->isFPOrFPVectorTy()) diff --git a/lib/Target/Hexagon/HexagonGenPredicate.cpp b/lib/Target/Hexagon/HexagonGenPredicate.cpp index 6905c4f6d12..d9675b5173d 100644 --- a/lib/Target/Hexagon/HexagonGenPredicate.cpp +++ b/lib/Target/Hexagon/HexagonGenPredicate.cpp @@ -250,7 +250,7 @@ Register HexagonGenPredicate::getPredRegFor(const Register &Reg) { unsigned NewPR = MRI->createVirtualRegister(PredRC); // For convertible instructions, do not modify them, so that they can - // be coverted later. Generate a copy from Reg to NewPR. + // be converted later. Generate a copy from Reg to NewPR. if (isConvertibleToPredForm(DefI)) { MachineBasicBlock::iterator DefIt = DefI; BuildMI(B, std::next(DefIt), DL, TII->get(TargetOpcode::COPY), NewPR) diff --git a/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/lib/Target/Hexagon/HexagonHardwareLoops.cpp index 53b6bf617e8..e328880c9db 100644 --- a/lib/Target/Hexagon/HexagonHardwareLoops.cpp +++ b/lib/Target/Hexagon/HexagonHardwareLoops.cpp @@ -727,9 +727,9 @@ CountValue *HexagonHardwareLoops::computeCount(MachineLoop *Loop, // Phis that may feed into the loop. LoopFeederMap LoopFeederPhi; - // Check if the inital value may be zero and can be decremented in the first + // Check if the initial value may be zero and can be decremented in the first // iteration. If the value is zero, the endloop instruction will not decrement - // the loop counter, so we shoudn't generate a hardware loop in this case. + // the loop counter, so we shouldn't generate a hardware loop in this case. if (loopCountMayWrapOrUnderFlow(Start, End, Loop->getLoopPreheader(), Loop, LoopFeederPhi)) return nullptr; @@ -1440,7 +1440,7 @@ bool HexagonHardwareLoops::loopCountMayWrapOrUnderFlow( if (Comparison::isSigned(Cmp)) return false; - // Check if there is a comparison of the inital value. If the initial value + // Check if there is a comparison of the initial value. If the initial value // is greater than or not equal to another value, then assume this is a // range check. if ((Cmp & Comparison::G) || Cmp == Comparison::NE) diff --git a/lib/Target/MSP430/MSP430MachineFunctionInfo.cpp b/lib/Target/MSP430/MSP430MachineFunctionInfo.cpp index 08981b9d49a..b442fc03b25 100644 --- a/lib/Target/MSP430/MSP430MachineFunctionInfo.cpp +++ b/lib/Target/MSP430/MSP430MachineFunctionInfo.cpp @@ -1,4 +1,4 @@ -//===-- MSP430MachineFucntionInfo.cpp - MSP430 machine function info ------===// +//===-- MSP430MachineFunctionInfo.cpp - MSP430 machine function info ------===// // // The LLVM Compiler Infrastructure // diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp index b3f85fd4e61..5f46620e077 100644 --- a/lib/Target/Mips/MipsISelLowering.cpp +++ b/lib/Target/Mips/MipsISelLowering.cpp @@ -3023,7 +3023,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain, // We ought to be able to use LocVT directly but O32 sets it to i32 // when allocating floating point values to integer registers. // This shouldn't influence how we load the value into registers unless - // we are targetting softfloat. + // we are targeting softfloat. if (VA.getValVT().isFloatingPoint() && !Subtarget.useSoftFloat()) LocVT = VA.getValVT(); } diff --git a/lib/Target/NVPTX/NVPTXFavorNonGenericAddrSpaces.cpp b/lib/Target/NVPTX/NVPTXFavorNonGenericAddrSpaces.cpp index 69a229e32f4..781900a2e79 100644 --- a/lib/Target/NVPTX/NVPTXFavorNonGenericAddrSpaces.cpp +++ b/lib/Target/NVPTX/NVPTXFavorNonGenericAddrSpaces.cpp @@ -98,7 +98,7 @@ private: /// This reordering exposes to optimizeMemoryInstruction more /// optimization opportunities on loads and stores. /// - /// If this function succesfully hoists an eliminable addrspacecast or V is + /// If this function successfully hoists an eliminable addrspacecast or V is /// already such an addrspacecast, it returns the transformed value (which is /// guaranteed to be an addrspacecast); otherwise, it returns nullptr. Value *hoistAddrSpaceCastFrom(Value *V, int Depth = 0); diff --git a/lib/Target/NVPTX/NVPTXLowerAlloca.cpp b/lib/Target/NVPTX/NVPTXLowerAlloca.cpp index 93d0025d8f5..624052e9b98 100644 --- a/lib/Target/NVPTX/NVPTXLowerAlloca.cpp +++ b/lib/Target/NVPTX/NVPTXLowerAlloca.cpp @@ -81,7 +81,7 @@ bool NVPTXLowerAlloca::runOnBasicBlock(BasicBlock &BB) { // Check Load, Store, GEP, and BitCast Uses on alloca and make them // use the converted generic address, in order to expose non-generic // addrspacecast to NVPTXFavorNonGenericAddrSpace. For other types - // of instructions this is unecessary and may introduce redudant + // of instructions this is unnecessary and may introduce redundant // address cast. const auto &AllocaUse = *UI++; auto LI = dyn_cast(AllocaUse.getUser()); diff --git a/lib/Target/NVPTX/NVPTXLowerKernelArgs.cpp b/lib/Target/NVPTX/NVPTXLowerKernelArgs.cpp index daf44482934..4cd96f0ce4e 100644 --- a/lib/Target/NVPTX/NVPTXLowerKernelArgs.cpp +++ b/lib/Target/NVPTX/NVPTXLowerKernelArgs.cpp @@ -138,7 +138,7 @@ INITIALIZE_PASS(NVPTXLowerKernelArgs, "nvptx-lower-kernel-args", // // The above code allocates some space in the stack and copies the incoming // struct from param space to local space. -// Then replace all occurences of %d by %temp. +// Then replace all occurrences of %d by %temp. // ============================================================================= void NVPTXLowerKernelArgs::handleByValParam(Argument *Arg) { Function *Func = Arg->getParent(); diff --git a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp index 01a3acb742e..bf3072f783e 100644 --- a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp +++ b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp @@ -1028,7 +1028,7 @@ class BitPermutationSelector { BitGroups[BitGroups.size()-1].EndIdx == Bits.size()-1 && BitGroups[0].V == BitGroups[BitGroups.size()-1].V && BitGroups[0].RLAmt == BitGroups[BitGroups.size()-1].RLAmt) { - DEBUG(dbgs() << "\tcombining final bit group with inital one\n"); + DEBUG(dbgs() << "\tcombining final bit group with initial one\n"); BitGroups[BitGroups.size()-1].EndIdx = BitGroups[0].EndIdx; BitGroups.erase(BitGroups.begin()); } diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 482d2a5ecaa..5b0161df4bd 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -15308,11 +15308,11 @@ static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask, /// \brief Creates an SDNode for a predicated scalar operation. /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc). -/// The mask is comming as MVT::i8 and it should be truncated +/// The mask is coming as MVT::i8 and it should be truncated /// to MVT::i1 while lowering masking intrinsics. /// The main difference between ScalarMaskingNode and VectorMaskingNode is using -/// "X86select" instead of "vselect". We just can't create the "vselect" node for -/// a scalar instruction. +/// "X86select" instead of "vselect". We just can't create the "vselect" node +/// for a scalar instruction. static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask, SDValue PreservedSrc, const X86Subtarget *Subtarget, @@ -15454,7 +15454,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget SDValue Src0 = Op.getOperand(3); SDValue Mask = Op.getOperand(4); // There are 2 kinds of intrinsics in this group: - // (1) With supress-all-exceptions (sae) or rounding mode- 6 operands + // (1) With suppress-all-exceptions (sae) or rounding mode- 6 operands // (2) With rounding mode and sae - 7 operands. if (Op.getNumOperands() == 6) { SDValue Sae = Op.getOperand(5); @@ -18069,7 +18069,7 @@ X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const { // lowered to just a load without a fence. A mfence flushes the store buffer, // making the optimization clearly correct. // FIXME: it is required if isAtLeastRelease(Order) but it is not clear - // otherwise, we might be able to be more agressive on relaxed idempotent + // otherwise, we might be able to be more aggressive on relaxed idempotent // rmw. In practice, they do not look useful, so we don't try to be // especially clever. if (SynchScope == SingleThread) @@ -25690,7 +25690,7 @@ static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG, } // Check if we can bypass extracting and re-inserting an element of an input - // vector. Essentialy: + // vector. Essentially: // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x) if (V.getOpcode() == ISD::SCALAR_TO_VECTOR && V.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT && diff --git a/lib/Transforms/InstCombine/InstructionCombining.cpp b/lib/Transforms/InstCombine/InstructionCombining.cpp index 734056d2118..d39b49fca86 100644 --- a/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -519,7 +519,7 @@ static Value *tryFactorization(InstCombiner::BuilderTy *Builder, if (isa(Op1)) HasNSW &= Op1->hasNoSignedWrap(); - // We can propogate 'nsw' if we know that + // We can propagate 'nsw' if we know that // %Y = mul nsw i16 %X, C // %Z = add nsw i16 %Y, %X // => diff --git a/lib/Transforms/ObjCARC/ObjCARCOpts.cpp b/lib/Transforms/ObjCARC/ObjCARCOpts.cpp index 9edbb17e8d1..b2d11e7035f 100644 --- a/lib/Transforms/ObjCARC/ObjCARCOpts.cpp +++ b/lib/Transforms/ObjCARC/ObjCARCOpts.cpp @@ -482,7 +482,7 @@ namespace { /// A flag indicating whether this optimization pass should run. bool Run; - /// Flags which determine whether each of the interesting runtine functions + /// Flags which determine whether each of the interesting runtime functions /// is in fact used in the current function. unsigned UsedInThisFunction; @@ -1264,7 +1264,7 @@ ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst, Arg = GetArgRCIdentityRoot(Inst); TopDownPtrState &S = MyStates.getPtrTopDownState(Arg); NestingDetected |= S.InitTopDown(Class, Inst); - // A retain can be a potential use; procede to the generic checking + // A retain can be a potential use; proceed to the generic checking // code below. break; } diff --git a/lib/Transforms/ObjCARC/PtrState.h b/lib/Transforms/ObjCARC/PtrState.h index e45e1ea96c5..a8c6ff1ed62 100644 --- a/lib/Transforms/ObjCARC/PtrState.h +++ b/lib/Transforms/ObjCARC/PtrState.h @@ -96,7 +96,7 @@ struct RRInfo { }; /// \brief This class summarizes several per-pointer runtime properties which -/// are propogated through the flow graph. +/// are propagated through the flow graph. class PtrState { protected: /// True if the reference count is known to be incremented. @@ -172,7 +172,7 @@ struct BottomUpPtrState : PtrState { bool InitBottomUp(ARCMDKindCache &Cache, Instruction *I); /// Return true if this set of releases can be paired with a release. Modifies - /// state appropriately to reflect that the matching occured if it is + /// state appropriately to reflect that the matching occurred if it is /// successful. /// /// It is assumed that one has already checked that the RCIdentity of the @@ -194,7 +194,7 @@ struct TopDownPtrState : PtrState { /// Return true if this set of retains can be paired with the given /// release. Modifies state appropriately to reflect that the matching - /// occured. + /// occurred. bool MatchWithRelease(ARCMDKindCache &Cache, Instruction *Release); void HandlePotentialUse(Instruction *Inst, const Value *Ptr, diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp index f0ac76d84b5..f256af81fc8 100644 --- a/lib/Transforms/Scalar/GVN.cpp +++ b/lib/Transforms/Scalar/GVN.cpp @@ -1771,7 +1771,7 @@ static void patchReplacementInstruction(Instruction *I, Value *Repl) { if (Instruction *ReplInst = dyn_cast(Repl)) { // FIXME: If both the original and replacement value are part of the // same control-flow region (meaning that the execution of one - // guarentees the executation of the other), then we can combine the + // guarantees the execution of the other), then we can combine the // noalias scopes here and do better than the general conservative // answer used in combineMetadata(). @@ -2766,7 +2766,7 @@ void GVN::addDeadBlock(BasicBlock *BB) { // R be the target of the dead out-coming edge. // 1) Identify the set of dead blocks implied by the branch's dead outcoming // edge. The result of this step will be {X| X is dominated by R} -// 2) Identify those blocks which haves at least one dead prodecessor. The +// 2) Identify those blocks which haves at least one dead predecessor. The // result of this step will be dominance-frontier(R). // 3) Update the PHIs in DF(R) by replacing the operands corresponding to // dead blocks with "UndefVal" in an hope these PHIs will optimized away. diff --git a/lib/Transforms/Scalar/LICM.cpp b/lib/Transforms/Scalar/LICM.cpp index bcc05c38a04..76848c76a85 100644 --- a/lib/Transforms/Scalar/LICM.cpp +++ b/lib/Transforms/Scalar/LICM.cpp @@ -402,7 +402,7 @@ bool llvm::hoistRegion(DomTreeNode *N, AliasAnalysis *AA, LoopInfo *LI, } /// Computes loop safety information, checks loop body & header -/// for the possiblity of may throw exception. +/// for the possibility of may throw exception. /// void llvm::computeLICMSafetyInfo(LICMSafetyInfo * SafetyInfo, Loop * CurLoop) { assert(CurLoop != nullptr && "CurLoop cant be null"); @@ -410,7 +410,7 @@ void llvm::computeLICMSafetyInfo(LICMSafetyInfo * SafetyInfo, Loop * CurLoop) { // Setting default safety values. SafetyInfo->MayThrow = false; SafetyInfo->HeaderMayThrow = false; - // Iterate over header and compute dafety info. + // Iterate over header and compute safety info. for (BasicBlock::iterator I = Header->begin(), E = Header->end(); (I != E) && !SafetyInfo->HeaderMayThrow; ++I) SafetyInfo->HeaderMayThrow |= I->mayThrow(); diff --git a/lib/Transforms/Scalar/LoopInterchange.cpp b/lib/Transforms/Scalar/LoopInterchange.cpp index 9d7e57ffeba..2f3b5e97185 100644 --- a/lib/Transforms/Scalar/LoopInterchange.cpp +++ b/lib/Transforms/Scalar/LoopInterchange.cpp @@ -489,7 +489,7 @@ struct LoopInterchange : public FunctionPass { unsigned selectLoopForInterchange(LoopVector LoopList) { // TODO: Add a better heuristic to select the loop to be interchanged based - // on the dependece matrix. Currently we select the innermost loop. + // on the dependence matrix. Currently we select the innermost loop. return LoopList.size() - 1; } @@ -544,7 +544,7 @@ struct LoopInterchange : public FunctionPass { } unsigned SelecLoopId = selectLoopForInterchange(LoopList); - // Move the selected loop outwards to the best posible position. + // Move the selected loop outwards to the best possible position. for (unsigned i = SelecLoopId; i > 0; i--) { bool Interchanged = processLoop(LoopList, i, i - 1, LoopNestExit, DependencyMatrix); @@ -655,7 +655,7 @@ bool LoopInterchangeLegality::tightlyNested(Loop *OuterLoop, Loop *InnerLoop) { DEBUG(dbgs() << "Checking instructions in Loop header and Loop latch \n"); // We do not have any basic block in between now make sure the outer header - // and outer loop latch doesnt contain any unsafe instructions. + // and outer loop latch doesn't contain any unsafe instructions. if (containsUnsafeInstructionsInHeader(OuterLoopHeader) || containsUnsafeInstructionsInLatch(OuterLoopLatch)) return false; @@ -836,7 +836,7 @@ bool LoopInterchangeLegality::currentLimitations() { else FoundInduction = true; } - // The loop latch ended and we didnt find the induction variable return as + // The loop latch ended and we didn't find the induction variable return as // current limitation. if (!FoundInduction) return true; @@ -966,7 +966,7 @@ bool LoopInterchangeProfitability::isProfitable(unsigned InnerLoopId, unsigned OuterLoopId, CharMatrix &DepMatrix) { - // TODO: Add Better Profitibility checks. + // TODO: Add better profitability checks. // e.g // 1) Construct dependency matrix and move the one with no loop carried dep // inside to enable vectorization. @@ -980,7 +980,7 @@ bool LoopInterchangeProfitability::isProfitable(unsigned InnerLoopId, if (Cost < 0) return true; - // It is not profitable as per current cache profitibility model. But check if + // It is not profitable as per current cache profitability model. But check if // we can move this loop outside to improve parallelism. bool ImprovesPar = isProfitabileForVectorization(InnerLoopId, OuterLoopId, DepMatrix); @@ -1045,7 +1045,7 @@ bool LoopInterchangeTransform::transform() { splitInnerLoopLatch(InnerIndexVar); DEBUG(dbgs() << "splitInnerLoopLatch Done\n"); - // Splits the inner loops phi nodes out into a seperate basic block. + // Splits the inner loops phi nodes out into a separate basic block. splitInnerLoopHeader(); DEBUG(dbgs() << "splitInnerLoopHeader Done\n"); } @@ -1181,8 +1181,8 @@ bool LoopInterchangeTransform::adjustLoopBranches() { if (!OuterLoopPredecessorBI || !InnerLoopLatchPredecessorBI) return false; - BasicBlock *InnerLoopHeaderSucessor = InnerLoopHeader->getUniqueSuccessor(); - if (!InnerLoopHeaderSucessor) + BasicBlock *InnerLoopHeaderSuccessor = InnerLoopHeader->getUniqueSuccessor(); + if (!InnerLoopHeaderSuccessor) return false; // Adjust Loop Preheader and headers @@ -1198,11 +1198,11 @@ bool LoopInterchangeTransform::adjustLoopBranches() { if (OuterLoopHeaderBI->getSuccessor(i) == OuterLoopLatch) OuterLoopHeaderBI->setSuccessor(i, LoopExit); else if (OuterLoopHeaderBI->getSuccessor(i) == InnerLoopPreHeader) - OuterLoopHeaderBI->setSuccessor(i, InnerLoopHeaderSucessor); + OuterLoopHeaderBI->setSuccessor(i, InnerLoopHeaderSuccessor); } // Adjust reduction PHI's now that the incoming block has changed. - updateIncomingBlock(InnerLoopHeaderSucessor, InnerLoopHeader, + updateIncomingBlock(InnerLoopHeaderSuccessor, InnerLoopHeader, OuterLoopHeader); BranchInst::Create(OuterLoopPreHeader, InnerLoopHeaderBI); diff --git a/lib/Transforms/Scalar/LoopUnrollPass.cpp b/lib/Transforms/Scalar/LoopUnrollPass.cpp index 643bf414575..67d5824f0ab 100644 --- a/lib/Transforms/Scalar/LoopUnrollPass.cpp +++ b/lib/Transforms/Scalar/LoopUnrollPass.cpp @@ -373,7 +373,7 @@ private: /// Try to simplify binary operator I. /// - /// TODO: Probaly it's worth to hoist the code for estimating the + /// TODO: Probably it's worth to hoist the code for estimating the /// simplifications effects to a separate class, since we have a very similar /// code in InlineCost already. bool visitBinaryOperator(BinaryOperator &I) { @@ -851,7 +851,7 @@ unsigned LoopUnroll::selectUnrollCount( unsigned Count = UserCount ? CurrentCount : 0; // If there is no user-specified count, unroll pragmas have the next - // highest precendence. + // highest precedence. if (Count == 0) { if (PragmaCount) { Count = PragmaCount; diff --git a/lib/Transforms/Scalar/PlaceSafepoints.cpp b/lib/Transforms/Scalar/PlaceSafepoints.cpp index d6a48c783b4..79b4e1a83ee 100644 --- a/lib/Transforms/Scalar/PlaceSafepoints.cpp +++ b/lib/Transforms/Scalar/PlaceSafepoints.cpp @@ -27,7 +27,7 @@ // well defined state for inspection by the collector. In the current // implementation, this is done via the insertion of poll sites at method entry // and the backedge of most loops. We try to avoid inserting more polls than -// are neccessary to ensure a finite period between poll sites. This is not +// are necessary to ensure a finite period between poll sites. This is not // because the poll itself is expensive in the generated code; it's not. Polls // do tend to impact the optimizer itself in negative ways; we'd like to avoid // perturbing the optimization of the method as much as we can. @@ -91,7 +91,7 @@ STATISTIC(FiniteExecution, "Number of loops w/o safepoints finite execution"); using namespace llvm; -// Ignore oppurtunities to avoid placing safepoints on backedges, useful for +// Ignore opportunities to avoid placing safepoints on backedges, useful for // validation static cl::opt AllBackedges("spp-all-backedges", cl::Hidden, cl::init(false)); @@ -121,7 +121,7 @@ struct PlaceBackedgeSafepointsImpl : public FunctionPass { std::vector PollLocations; /// True unless we're running spp-no-calls in which case we need to disable - /// the call dependend placement opts. + /// the call-dependent placement opts. bool CallSafepointsEnabled; ScalarEvolution *SE = nullptr; @@ -220,7 +220,7 @@ static bool containsUnconditionalCallSafepoint(Loop *L, BasicBlock *Header, // For the moment, we look only for the 'cuts' that consist of a single call // instruction in a block which is dominated by the Header and dominates the // loop latch (Pred) block. Somewhat surprisingly, walking the entire chain - // of such dominating blocks gets substaintially more occurences than just + // of such dominating blocks gets substantially more occurrences than just // checking the Pred and Header blocks themselves. This may be due to the // density of loop exit conditions caused by range and null checks. // TODO: structure this as an analysis pass, cache the result for subloops, @@ -765,7 +765,7 @@ static bool isGCLeafFunction(const CallSite &CS) { // Most LLVM intrinsics are things which can never take a safepoint. // As a result, we don't need to have the stack parsable at the // callsite. This is a highly useful optimization since intrinsic - // calls are fairly prevelent, particularly in debug builds. + // calls are fairly prevalent, particularly in debug builds. return true; } @@ -933,7 +933,7 @@ static Value *ReplaceWithStatepoint(const CallSite &CS, /* to replace */ Token = Call; - // Put the following gc_result and gc_relocate calls immediately after the + // Put the following gc_result and gc_relocate calls immediately after // the old call (which we're about to delete). assert(ToReplace->getNextNode() && "not a terminator, must have next"); Builder.SetInsertPoint(ToReplace->getNextNode()); @@ -962,7 +962,7 @@ static Value *ReplaceWithStatepoint(const CallSite &CS, /* to replace */ // We'll insert the gc.result into the normal block BasicBlock *NormalDest = ToReplace->getNormalDest(); // Can not insert gc.result in case of phi nodes preset. - // Should have removed this cases prior to runnning this function + // Should have removed this cases prior to running this function assert(!isa(NormalDest->begin())); Instruction *IP = &*(NormalDest->getFirstInsertionPt()); Builder.SetInsertPoint(IP); diff --git a/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp index 6f70147c2a0..062c0d5612b 100644 --- a/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp +++ b/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp @@ -164,7 +164,7 @@ typedef DenseSet StatepointLiveSetTy; typedef DenseMap RematerializedValueMapTy; struct PartiallyConstructedSafepointRecord { - /// The set of values known to be live accross this safepoint + /// The set of values known to be live across this safepoint StatepointLiveSetTy liveset; /// Mapping from live pointers to a base-defining-value @@ -274,7 +274,7 @@ static void analyzeParsePointLiveness( if (PrintLiveSet) { // Note: This output is used by several of the test cases - // The order of elemtns in a set is not stable, put them in a vec and sort + // The order of elements in a set is not stable, put them in a vec and sort // by name SmallVector temp; temp.insert(temp.end(), liveset.begin(), liveset.end()); @@ -509,7 +509,7 @@ static Value *findBaseDefiningValue(Value *I) { return I; // I have absolutely no idea how to implement this part yet. It's not - // neccessarily hard, I just haven't really looked at it yet. + // necessarily hard, I just haven't really looked at it yet. assert(!isa(I) && "Landing Pad is unimplemented"); if (isa(I)) @@ -533,7 +533,7 @@ static Value *findBaseDefiningValue(Value *I) { "Base pointer for a struct is meaningless"); // The last two cases here don't return a base pointer. Instead, they - // return a value which dynamically selects from amoung several base + // return a value which dynamically selects from among several base // derived pointers (each with it's own base potentially). It's the job of // the caller to resolve these. assert((isa(I) || isa(I)) && @@ -717,7 +717,7 @@ static Value *findBasePointer(Value *I, DefiningValueMapTy &cache) { // // Note: A simpler form of this would be to add the conflict form of all // PHIs without running the optimistic algorithm. This would be - // analougous to pessimistic data flow and would likely lead to an + // analogous to pessimistic data flow and would likely lead to an // overall worse solution. #ifndef NDEBUG @@ -915,7 +915,7 @@ static Value *findBasePointer(Value *I, DefiningValueMapTy &cache) { assert(base != nullptr && "unknown BDVState!"); } - // In essense this assert states: the only way two + // In essence this assert states: the only way two // values incoming from the same basic block may be // different is by being different bitcasts of the same // value. A cleanup that remains TODO is changing @@ -1043,7 +1043,7 @@ findBasePointers(const StatepointLiveSetTy &live, // If you see this trip and like to live really dangerously, the code should // be correct, just with idioms the verifier can't handle. You can try - // disabling the verifier at your own substaintial risk. + // disabling the verifier at your own substantial risk. assert(!isa(base) && "the relocation code needs adjustment to handle the relocation of " "a null pointer constant without causing false positives in the " @@ -1089,7 +1089,7 @@ static void recomputeLiveInValues( Function &F, DominatorTree &DT, Pass *P, ArrayRef toUpdate, MutableArrayRef records) { // TODO-PERF: reuse the original liveness, then simply run the dataflow - // again. The old values are still live and will help it stablize quickly. + // again. The old values are still live and will help it stabilize quickly. GCPtrLivenessData RevisedLivenessData; computeLiveInValues(DT, F, RevisedLivenessData); for (size_t i = 0; i < records.size(); i++) { @@ -1131,7 +1131,7 @@ static int find_index(ArrayRef livevec, Value *val) { return index; } -// Create new attribute set containing only attributes which can be transfered +// Create new attribute set containing only attributes which can be transferred // from original call to the safepoint. static AttributeSet legalizeCallAttributes(AttributeSet AS) { AttributeSet ret; @@ -1263,7 +1263,7 @@ makeStatepointExplicitImpl(const CallSite &CS, /* to replace */ // Currently we will fail on parameter attributes and on certain // function attributes. AttributeSet new_attrs = legalizeCallAttributes(toReplace->getAttributes()); - // In case if we can handle this set of sttributes - set up function attrs + // In case if we can handle this set of attributes - set up function attrs // directly on statepoint and return attrs later for gc_result intrinsic. call->setAttributes(new_attrs.getFnAttributes()); return_attributes = new_attrs.getRetAttributes(); @@ -1293,7 +1293,7 @@ makeStatepointExplicitImpl(const CallSite &CS, /* to replace */ // Currently we will fail on parameter attributes and on certain // function attributes. AttributeSet new_attrs = legalizeCallAttributes(toReplace->getAttributes()); - // In case if we can handle this set of sttributes - set up function attrs + // In case if we can handle this set of attributes - set up function attrs // directly on statepoint and return attrs later for gc_result intrinsic. invoke->setAttributes(new_attrs.getFnAttributes()); return_attributes = new_attrs.getRetAttributes(); @@ -1571,7 +1571,7 @@ static void relocationViaAlloca( VisitedLiveValues); if (ClobberNonLive) { - // As a debuging aid, pretend that an unrelocated pointer becomes null at + // As a debugging aid, pretend that an unrelocated pointer becomes null at // the gc.statepoint. This will turn some subtle GC problems into // slightly easier to debug SEGVs. Note that on large IR files with // lots of gc.statepoints this is extremely costly both memory and time @@ -1742,10 +1742,10 @@ static void findLiveReferences( /// Remove any vector of pointers from the liveset by scalarizing them over the /// statepoint instruction. Adds the scalarized pieces to the liveset. It -/// would be preferrable to include the vector in the statepoint itself, but +/// would be preferable to include the vector in the statepoint itself, but /// the lowering code currently does not handle that. Extending it would be /// slightly non-trivial since it requires a format change. Given how rare -/// such cases are (for the moment?) scalarizing is an acceptable comprimise. +/// such cases are (for the moment?) scalarizing is an acceptable compromise. static void splitVectorValues(Instruction *StatepointInst, StatepointLiveSetTy &LiveSet, DenseMap& PointerToBase, @@ -1876,7 +1876,7 @@ static void splitVectorValues(Instruction *StatepointInst, // Helper function for the "rematerializeLiveValues". It walks use chain // starting from the "CurrentValue" until it meets "BaseValue". Only "simple" // values are visited (currently it is GEP's and casts). Returns true if it -// sucessfully reached "BaseValue" and false otherwise. +// successfully reached "BaseValue" and false otherwise. // Fills "ChainToBase" array with all visited values. "BaseValue" is not // recorded. static bool findRematerializableChainToBasePointer( @@ -2128,7 +2128,7 @@ static bool insertParsePoints(Function &F, DominatorTree &DT, Pass *P, } assert(records.size() == toUpdate.size()); - // A) Identify all gc pointers which are staticly live at the given call + // A) Identify all gc pointers which are statically live at the given call // site. findLiveReferences(F, DT, P, toUpdate, records); @@ -2205,7 +2205,7 @@ static bool insertParsePoints(Function &F, DominatorTree &DT, Pass *P, } // In order to reduce live set of statepoint we might choose to rematerialize - // some values instead of relocating them. This is purelly an optimization and + // some values instead of relocating them. This is purely an optimization and // does not influence correctness. TargetTransformInfo &TTI = P->getAnalysis().getTTI(F); @@ -2450,7 +2450,7 @@ static void computeLiveInValues(BasicBlock::reverse_iterator rbegin, "support for FCA unimplemented"); if (isHandledGCPointerType(V->getType()) && !isa(V)) { // The choice to exclude all things constant here is slightly subtle. - // There are two idependent reasons: + // There are two independent reasons: // - We assume that things which are constant (from LLVM's definition) // do not move at runtime. For example, the address of a global // variable is fixed, even though it's contents may not be. @@ -2588,7 +2588,7 @@ static void computeLiveInValues(DominatorTree &DT, Function &F, } // while( !worklist.empty() ) #ifndef NDEBUG - // Sanity check our ouput against SSA properties. This helps catch any + // Sanity check our output against SSA properties. This helps catch any // missing kills during the above iteration. for (BasicBlock &BB : F) { checkBasicSSA(DT, Data, BB); diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp index 947513a3657..9c69b6c7558 100644 --- a/lib/Transforms/Scalar/SROA.cpp +++ b/lib/Transforms/Scalar/SROA.cpp @@ -270,7 +270,8 @@ public: friend class AllocaSlices; friend class AllocaSlices::partition_iterator; - /// \brief The begining and ending offsets of the alloca for this partition. + /// \brief The beginning and ending offsets of the alloca for this + /// partition. uint64_t BeginOffset, EndOffset; /// \brief The start end end iterators of this partition. @@ -439,7 +440,7 @@ public: // OK, we need to consume new slices. Set the end offset based on the // current slice, and step SJ past it. The beginning offset of the - // parttion is the beginning offset of the next slice unless we have + // partition is the beginning offset of the next slice unless we have // pre-existing split slices that are continuing, in which case we begin // at the prior end offset. P.BeginOffset = P.SplitTails.empty() ? P.SI->beginOffset() : P.EndOffset; @@ -493,7 +494,7 @@ public: "End iterators don't match between compared partition iterators!"); // The observed positions of partitions is marked by the P.SI iterator and - // the emptyness of the split slices. The latter is only relevant when + // the emptiness of the split slices. The latter is only relevant when // P.SI == SE, as the end iterator will additionally have an empty split // slices list, but the prior may have the same P.SI and a tail of split // slices. @@ -1934,7 +1935,7 @@ static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V, /// \brief Test whether the given slice use can be promoted to a vector. /// -/// This function is called to test each entry in a partioning which is slated +/// This function is called to test each entry in a partition which is slated /// for a single slice. static bool isVectorPromotionViableForSlice(AllocaSlices::Partition &P, const Slice &S, VectorType *Ty, @@ -2130,7 +2131,7 @@ static bool isIntegerWideningViableForSlice(const Slice &S, uint64_t RelEnd = S.endOffset() - AllocBeginOffset; // We can't reasonably handle cases where the load or store extends past - // the end of the aloca's type and into its padding. + // the end of the alloca's type and into its padding. if (RelEnd > Size) return false; @@ -3711,7 +3712,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { return true; }), Stores.end()); - // Now we have to go *back* through all te stores, because a later store may + // Now we have to go *back* through all the stores, because a later store may // have caused an earlier store's load to become unsplittable and if it is // unsplittable for the later store, then we can't rely on it being split in // the earlier store either. @@ -3972,7 +3973,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { // Mark the original store as dead now that we've split it up and kill its // slice. Note that we leave the original load in place unless this store - // was its ownly use. It may in turn be split up if it is an alloca load + // was its only use. It may in turn be split up if it is an alloca load // for some other alloca, but it may be a normal load. This may introduce // redundant loads, but where those can be merged the rest of the optimizer // should handle the merging, and this uncovers SSA splits which is more @@ -4230,7 +4231,7 @@ bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) { std::max(NumPartitions, MaxPartitionsPerAlloca); // Migrate debug information from the old alloca to the new alloca(s) - // and the individial partitions. + // and the individual partitions. if (DbgDeclareInst *DbgDecl = FindAllocaDbgDeclare(&AI)) { auto *Var = DbgDecl->getVariable(); auto *Expr = DbgDecl->getExpression(); diff --git a/lib/Transforms/Utils/CloneFunction.cpp b/lib/Transforms/Utils/CloneFunction.cpp index cf11119086c..52d13b3a94a 100644 --- a/lib/Transforms/Utils/CloneFunction.cpp +++ b/lib/Transforms/Utils/CloneFunction.cpp @@ -484,7 +484,7 @@ void llvm::CloneAndPruneIntoFromInst(Function *NewFunc, const Function *OldFunc, } #ifndef NDEBUG - // If the cloning starts at the begining of the function, verify that + // If the cloning starts at the beginning of the function, verify that // the function arguments are mapped. if (!StartingInst) for (Function::const_arg_iterator II = OldFunc->arg_begin(), diff --git a/lib/Transforms/Utils/LowerSwitch.cpp b/lib/Transforms/Utils/LowerSwitch.cpp index 4acd988691d..3b447098af1 100644 --- a/lib/Transforms/Utils/LowerSwitch.cpp +++ b/lib/Transforms/Utils/LowerSwitch.cpp @@ -163,7 +163,7 @@ static void fixPhis(BasicBlock *SuccBB, BasicBlock *OrigBB, BasicBlock *NewBB, I != IE; ++I) { PHINode *PN = cast(I); - // Only update the first occurence. + // Only update the first occurrence. unsigned Idx = 0, E = PN->getNumIncomingValues(); unsigned LocalNumMergedCases = NumMergedCases; for (; Idx != E; ++Idx) { @@ -173,7 +173,7 @@ static void fixPhis(BasicBlock *SuccBB, BasicBlock *OrigBB, BasicBlock *NewBB, } } - // Remove additional occurences coming from condensed cases and keep the + // Remove additional occurrences coming from condensed cases and keep the // number of incoming values equal to the number of branches to SuccBB. SmallVector Indices; for (++Idx; LocalNumMergedCases > 0 && Idx < E; ++Idx) @@ -424,7 +424,7 @@ void LowerSwitch::processSwitchInst(SwitchInst *SI) { std::vector UnreachableRanges; if (isa(Default->getFirstNonPHIOrDbg())) { - // Make the bounds tightly fitted around the case value range, becase we + // Make the bounds tightly fitted around the case value range, because we // know that the value passed to the switch must be exactly one of the case // values. assert(!Cases.empty()); diff --git a/lib/Transforms/Utils/SimplifyIndVar.cpp b/lib/Transforms/Utils/SimplifyIndVar.cpp index fa0459f62c9..10554845e5a 100644 --- a/lib/Transforms/Utils/SimplifyIndVar.cpp +++ b/lib/Transforms/Utils/SimplifyIndVar.cpp @@ -63,7 +63,7 @@ namespace { /// Iteratively perform simplification on a worklist of users of the /// specified induction variable. This is the top-level driver that applies - /// all simplicitions to users of an IV. + /// all simplifications to users of an IV. void simplifyUsers(PHINode *CurrIV, IVVisitor *V = nullptr); Value *foldIVUser(Instruction *UseInst, Instruction *IVOperand); @@ -184,7 +184,7 @@ void SimplifyIndvar::eliminateIVComparison(ICmpInst *ICmp, Value *IVOperand) { InvariantPredicate, InvariantLHS, InvariantRHS)) { - // Rewrite the comparision to a loop invariant comparision if it can be done + // Rewrite the comparison to a loop invariant comparison if it can be done // cheaply, where cheaply means "we don't need to emit any new // instructions". @@ -482,8 +482,8 @@ static bool isSimpleIVUser(Instruction *I, const Loop *L, ScalarEvolution *SE) { /// This algorithm does not require IVUsers analysis. Instead, it simplifies /// instructions in-place during analysis. Rather than rewriting induction /// variables bottom-up from their users, it transforms a chain of IVUsers -/// top-down, updating the IR only when it encouters a clear optimization -/// opportunitiy. +/// top-down, updating the IR only when it encounters a clear optimization +/// opportunity. /// /// Once DisableIVRewrite is default, LSR will be the only client of IVUsers. /// diff --git a/lib/Transforms/Vectorize/SLPVectorizer.cpp b/lib/Transforms/Vectorize/SLPVectorizer.cpp index b215a256af0..61773cd2013 100644 --- a/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -162,7 +162,7 @@ static bool canCombineAsAltInst(unsigned Op) { return false; } -/// \returns ShuffleVector instruction if intructions in \p VL have +/// \returns ShuffleVector instruction if instructions in \p VL have /// alternate fadd,fsub / fsub,fadd/add,sub/sub,add sequence. /// (i.e. e.g. opcodes of fadd,fsub,fadd,fsub...) static unsigned isAltInst(ArrayRef VL) { @@ -393,7 +393,7 @@ public: /// \brief Perform LICM and CSE on the newly generated gather sequences. void optimizeGatherSequence(); - /// \returns true if it is benefitial to reverse the vector order. + /// \returns true if it is beneficial to reverse the vector order. bool shouldReorder() const { return NumLoadsWantToChangeOrder > NumLoadsWantToKeepOrder; } @@ -441,7 +441,7 @@ private: /// \returns a vector from a collection of scalars in \p VL. Value *Gather(ArrayRef VL, VectorType *Ty); - /// \returns whether the VectorizableTree is fully vectoriable and will + /// \returns whether the VectorizableTree is fully vectorizable and will /// be beneficial even the tree height is tiny. bool isFullyVectorizableTinyTree(); @@ -2898,8 +2898,8 @@ void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, } } else { // I'm not sure if this can ever happen. But we need to be safe. - // This lets the instruction/bundle never be scheduled and eventally - // disable vectorization. + // This lets the instruction/bundle never be scheduled and + // eventually disable vectorization. BundleMember->Dependencies++; BundleMember->incrementUnscheduledDeps(1); } @@ -3005,7 +3005,7 @@ void BoUpSLP::scheduleBlock(BlockScheduling *BS) { }; std::set ReadyInsts; - // Ensure that all depencency data is updated and fill the ready-list with + // Ensure that all dependency data is updated and fill the ready-list with // initial instructions. int Idx = 0; int NumToSchedule = 0; @@ -3732,7 +3732,7 @@ public: private: - /// \brief Calcuate the cost of a reduction. + /// \brief Calculate the cost of a reduction. int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal) { Type *ScalarTy = FirstReducedVal->getType(); Type *VecTy = VectorType::get(ScalarTy, ReduxWidth); -- 2.34.1