From a8adbcb9de5ba60a6b5677c64cd64491806f5881 Mon Sep 17 00:00:00 2001 From: Eric Christopher Date: Thu, 14 May 2015 23:07:13 +0000 Subject: [PATCH] 80-col fixups. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@237403 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/Target/TargetLowering.h | 36 ++++++++++++++++------------ 1 file changed, 21 insertions(+), 15 deletions(-) diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h index 50b34ba8365..2f9d6adadb5 100644 --- a/include/llvm/Target/TargetLowering.h +++ b/include/llvm/Target/TargetLowering.h @@ -126,8 +126,8 @@ public: /// Enum that specifies what a AtomicRMWInst is expanded to, if at all. Exists /// because different targets have different levels of support for these - /// atomic RMW instructions, and also have different options w.r.t. what they should - /// expand to. + /// atomic RMW instructions, and also have different options w.r.t. what they + /// should expand to. enum class AtomicRMWExpansionKind { None, // Don't expand the instruction. LLSC, // Expand the instruction into loadlinked/storeconditional; used @@ -258,11 +258,12 @@ public: /// isLoadBitCastBeneficial() - Return true if the following transform /// is beneficial. /// fold (conv (load x)) -> (load (conv*)x) - /// On architectures that don't natively support some vector loads efficiently, - /// casting the load to a smaller vector of larger types and loading - /// is more efficient, however, this can be undone by optimizations in + /// On architectures that don't natively support some vector loads + /// efficiently, casting the load to a smaller vector of larger types and + /// loading is more efficient, however, this can be undone by optimizations in /// dag combiner. - virtual bool isLoadBitCastBeneficial(EVT /* Load */, EVT /* Bitcast */) const { + virtual bool isLoadBitCastBeneficial(EVT /* Load */, + EVT /* Bitcast */) const { return true; } @@ -270,7 +271,7 @@ public: virtual bool isCheapToSpeculateCttz() const { return false; } - + /// \brief Return true if it is cheap to speculate a call to intrinsic ctlz. virtual bool isCheapToSpeculateCtlz() const { return false; @@ -573,7 +574,8 @@ public: /// Return how this load with extension should be treated: either it is legal, /// needs to be promoted to a larger size, needs to be expanded to some other /// code sequence, or the target has a custom expander for it. - LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const { + LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, + EVT MemVT) const { if (ValVT.isExtended() || MemVT.isExtended()) return Expand; unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy; unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy; @@ -1053,8 +1055,9 @@ public: /// seq_cst. But if they are lowered to monotonic accesses, no amount of /// IR-level fences can prevent it. /// @{ - virtual Instruction* emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord, - bool IsStore, bool IsLoad) const { + virtual Instruction *emitLeadingFence(IRBuilder<> &Builder, + AtomicOrdering Ord, bool IsStore, + bool IsLoad) const { if (!getInsertFencesForAtomic()) return nullptr; @@ -1064,8 +1067,9 @@ public: return nullptr; } - virtual Instruction* emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord, - bool IsStore, bool IsLoad) const { + virtual Instruction *emitTrailingFence(IRBuilder<> &Builder, + AtomicOrdering Ord, bool IsStore, + bool IsLoad) const { if (!getInsertFencesForAtomic()) return nullptr; @@ -1110,7 +1114,8 @@ public: /// it succeeds, and nullptr otherwise. /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo /// another round of expansion. - virtual LoadInst *lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const { + virtual LoadInst * + lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const { return nullptr; } @@ -1962,7 +1967,8 @@ protected: /// Replace/modify any TargetFrameIndex operands with a targte-dependent /// sequence of memory operands that is recognized by PrologEpilogInserter. - MachineBasicBlock *emitPatchPoint(MachineInstr *MI, MachineBasicBlock *MBB) const; + MachineBasicBlock *emitPatchPoint(MachineInstr *MI, + MachineBasicBlock *MBB) const; }; /// This class defines information used to lower LLVM code to legal SelectionDAG @@ -2687,7 +2693,7 @@ public: /// Hooks for building estimates in place of slower divisions and square /// roots. - + /// Return a reciprocal square root estimate value for the input operand. /// The RefinementSteps output is the number of Newton-Raphson refinement /// iterations required to generate a sufficient (though not necessarily -- 2.34.1