1 //===-- PPCCTRLoops.cpp - Identify and generate CTR loops -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass identifies loops where we can generate the PPC branch instructions
11 // that decrement and test the count register (CTR) (bdnz and friends).
13 // The pattern that defines the induction variable can changed depending on
14 // prior optimizations. For example, the IndVarSimplify phase run by 'opt'
15 // normalizes induction variables, and the Loop Strength Reduction pass
16 // run by 'llc' may also make changes to the induction variable.
18 // Criteria for CTR loops:
19 // - Countable loops (w/ ind. var for a trip count)
20 // - Try inner-most loops first
21 // - No nested CTR loops.
22 // - No function calls in loops.
24 //===----------------------------------------------------------------------===//
26 #include "llvm/Transforms/Scalar.h"
28 #include "PPCTargetMachine.h"
29 #include "llvm/ADT/STLExtras.h"
30 #include "llvm/ADT/Statistic.h"
31 #include "llvm/Analysis/LoopInfo.h"
32 #include "llvm/Analysis/ScalarEvolutionExpander.h"
33 #include "llvm/Analysis/TargetLibraryInfo.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Dominators.h"
37 #include "llvm/IR/InlineAsm.h"
38 #include "llvm/IR/Instructions.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/IR/Module.h"
41 #include "llvm/IR/ValueHandle.h"
42 #include "llvm/PassSupport.h"
43 #include "llvm/Support/CommandLine.h"
44 #include "llvm/Support/Debug.h"
45 #include "llvm/Support/raw_ostream.h"
46 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
47 #include "llvm/Transforms/Utils/Local.h"
48 #include "llvm/Transforms/Utils/LoopUtils.h"
51 #include "llvm/CodeGen/MachineDominators.h"
52 #include "llvm/CodeGen/MachineFunction.h"
53 #include "llvm/CodeGen/MachineFunctionPass.h"
54 #include "llvm/CodeGen/MachineRegisterInfo.h"
62 #define DEBUG_TYPE "ctrloops"
65 static cl::opt<int> CTRLoopLimit("ppc-max-ctrloop", cl::Hidden, cl::init(-1));
68 STATISTIC(NumCTRLoops, "Number of loops converted to CTR loops");
71 void initializePPCCTRLoopsPass(PassRegistry&);
73 void initializePPCCTRLoopsVerifyPass(PassRegistry&);
78 struct PPCCTRLoops : public FunctionPass {
87 PPCCTRLoops() : FunctionPass(ID), TM(nullptr) {
88 initializePPCCTRLoopsPass(*PassRegistry::getPassRegistry());
90 PPCCTRLoops(PPCTargetMachine &TM) : FunctionPass(ID), TM(&TM) {
91 initializePPCCTRLoopsPass(*PassRegistry::getPassRegistry());
94 bool runOnFunction(Function &F) override;
96 void getAnalysisUsage(AnalysisUsage &AU) const override {
97 AU.addRequired<LoopInfo>();
98 AU.addPreserved<LoopInfo>();
99 AU.addRequired<DominatorTreeWrapperPass>();
100 AU.addPreserved<DominatorTreeWrapperPass>();
101 AU.addRequired<ScalarEvolution>();
105 bool mightUseCTR(const Triple &TT, BasicBlock *BB);
106 bool convertToCTRLoop(Loop *L);
109 PPCTargetMachine *TM;
112 const DataLayout *DL;
114 const TargetLibraryInfo *LibInfo;
117 char PPCCTRLoops::ID = 0;
119 int PPCCTRLoops::Counter = 0;
123 struct PPCCTRLoopsVerify : public MachineFunctionPass {
127 PPCCTRLoopsVerify() : MachineFunctionPass(ID) {
128 initializePPCCTRLoopsVerifyPass(*PassRegistry::getPassRegistry());
131 void getAnalysisUsage(AnalysisUsage &AU) const override {
132 AU.addRequired<MachineDominatorTree>();
133 MachineFunctionPass::getAnalysisUsage(AU);
136 bool runOnMachineFunction(MachineFunction &MF) override;
139 MachineDominatorTree *MDT;
142 char PPCCTRLoopsVerify::ID = 0;
144 } // end anonymous namespace
146 INITIALIZE_PASS_BEGIN(PPCCTRLoops, "ppc-ctr-loops", "PowerPC CTR Loops",
148 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
149 INITIALIZE_PASS_DEPENDENCY(LoopInfo)
150 INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
151 INITIALIZE_PASS_END(PPCCTRLoops, "ppc-ctr-loops", "PowerPC CTR Loops",
154 FunctionPass *llvm::createPPCCTRLoops(PPCTargetMachine &TM) {
155 return new PPCCTRLoops(TM);
159 INITIALIZE_PASS_BEGIN(PPCCTRLoopsVerify, "ppc-ctr-loops-verify",
160 "PowerPC CTR Loops Verify", false, false)
161 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
162 INITIALIZE_PASS_END(PPCCTRLoopsVerify, "ppc-ctr-loops-verify",
163 "PowerPC CTR Loops Verify", false, false)
165 FunctionPass *llvm::createPPCCTRLoopsVerify() {
166 return new PPCCTRLoopsVerify();
170 bool PPCCTRLoops::runOnFunction(Function &F) {
171 LI = &getAnalysis<LoopInfo>();
172 SE = &getAnalysis<ScalarEvolution>();
173 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
174 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
175 DL = DLP ? &DLP->getDataLayout() : nullptr;
176 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
177 LibInfo = TLIP ? &TLIP->getTLI() : nullptr;
179 bool MadeChange = false;
181 for (LoopInfo::iterator I = LI->begin(), E = LI->end();
184 if (!L->getParentLoop())
185 MadeChange |= convertToCTRLoop(L);
191 static bool isLargeIntegerTy(bool Is32Bit, Type *Ty) {
192 if (IntegerType *ITy = dyn_cast<IntegerType>(Ty))
193 return ITy->getBitWidth() > (Is32Bit ? 32U : 64U);
198 // Determining the address of a TLS variable results in a function call in
199 // certain TLS models.
200 static bool memAddrUsesCTR(const PPCTargetMachine *TM,
201 const llvm::Value *MemAddr) {
202 const auto *GV = dyn_cast<GlobalValue>(MemAddr);
205 if (!GV->isThreadLocal())
209 TLSModel::Model Model = TM->getTLSModel(GV);
210 return Model == TLSModel::GeneralDynamic || Model == TLSModel::LocalDynamic;
213 bool PPCCTRLoops::mightUseCTR(const Triple &TT, BasicBlock *BB) {
214 for (BasicBlock::iterator J = BB->begin(), JE = BB->end();
216 if (CallInst *CI = dyn_cast<CallInst>(J)) {
217 if (InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue())) {
218 // Inline ASM is okay, unless it clobbers the ctr register.
219 InlineAsm::ConstraintInfoVector CIV = IA->ParseConstraints();
220 for (unsigned i = 0, ie = CIV.size(); i < ie; ++i) {
221 InlineAsm::ConstraintInfo &C = CIV[i];
222 if (C.Type != InlineAsm::isInput)
223 for (unsigned j = 0, je = C.Codes.size(); j < je; ++j)
224 if (StringRef(C.Codes[j]).equals_lower("{ctr}"))
233 const TargetLowering *TLI = TM->getSubtargetImpl()->getTargetLowering();
235 if (Function *F = CI->getCalledFunction()) {
236 // Most intrinsics don't become function calls, but some might.
237 // sin, cos, exp and log are always calls.
239 if (F->getIntrinsicID() != Intrinsic::not_intrinsic) {
240 switch (F->getIntrinsicID()) {
243 // VisualStudio defines setjmp as _setjmp
244 #if defined(_MSC_VER) && defined(setjmp) && \
245 !defined(setjmp_undefined_for_msvc)
246 # pragma push_macro("setjmp")
248 # define setjmp_undefined_for_msvc
251 case Intrinsic::setjmp:
253 #if defined(_MSC_VER) && defined(setjmp_undefined_for_msvc)
254 // let's return it to _setjmp state
255 # pragma pop_macro("setjmp")
256 # undef setjmp_undefined_for_msvc
259 case Intrinsic::longjmp:
261 // Exclude eh_sjlj_setjmp; we don't need to exclude eh_sjlj_longjmp
262 // because, although it does clobber the counter register, the
263 // control can't then return to inside the loop unless there is also
264 // an eh_sjlj_setjmp.
265 case Intrinsic::eh_sjlj_setjmp:
267 case Intrinsic::memcpy:
268 case Intrinsic::memmove:
269 case Intrinsic::memset:
270 case Intrinsic::powi:
272 case Intrinsic::log2:
273 case Intrinsic::log10:
275 case Intrinsic::exp2:
280 case Intrinsic::copysign:
281 if (CI->getArgOperand(0)->getType()->getScalarType()->
285 continue; // ISD::FCOPYSIGN is never a library call.
286 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break;
287 case Intrinsic::floor: Opcode = ISD::FFLOOR; break;
288 case Intrinsic::ceil: Opcode = ISD::FCEIL; break;
289 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break;
290 case Intrinsic::rint: Opcode = ISD::FRINT; break;
291 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
292 case Intrinsic::round: Opcode = ISD::FROUND; break;
296 // PowerPC does not use [US]DIVREM or other library calls for
297 // operations on regular types which are not otherwise library calls
298 // (i.e. soft float or atomics). If adapting for targets that do,
299 // additional care is required here.
302 if (!F->hasLocalLinkage() && F->hasName() && LibInfo &&
303 LibInfo->getLibFunc(F->getName(), Func) &&
304 LibInfo->hasOptimizedCodeGen(Func)) {
305 // Non-read-only functions are never treated as intrinsics.
306 if (!CI->onlyReadsMemory())
309 // Conversion happens only for FP calls.
310 if (!CI->getArgOperand(0)->getType()->isFloatingPointTy())
314 default: return true;
315 case LibFunc::copysign:
316 case LibFunc::copysignf:
317 continue; // ISD::FCOPYSIGN is never a library call.
318 case LibFunc::copysignl:
323 continue; // ISD::FABS is never a library call.
327 Opcode = ISD::FSQRT; break;
329 case LibFunc::floorf:
330 case LibFunc::floorl:
331 Opcode = ISD::FFLOOR; break;
332 case LibFunc::nearbyint:
333 case LibFunc::nearbyintf:
334 case LibFunc::nearbyintl:
335 Opcode = ISD::FNEARBYINT; break;
339 Opcode = ISD::FCEIL; break;
343 Opcode = ISD::FRINT; break;
345 case LibFunc::roundf:
346 case LibFunc::roundl:
347 Opcode = ISD::FROUND; break;
349 case LibFunc::truncf:
350 case LibFunc::truncl:
351 Opcode = ISD::FTRUNC; break;
355 TLI->getSimpleValueType(CI->getArgOperand(0)->getType(), true);
356 if (VTy == MVT::Other)
359 if (TLI->isOperationLegalOrCustom(Opcode, VTy))
361 else if (VTy.isVector() &&
362 TLI->isOperationLegalOrCustom(Opcode, VTy.getScalarType()))
370 } else if (isa<BinaryOperator>(J) &&
371 J->getType()->getScalarType()->isPPC_FP128Ty()) {
372 // Most operations on ppc_f128 values become calls.
374 } else if (isa<UIToFPInst>(J) || isa<SIToFPInst>(J) ||
375 isa<FPToUIInst>(J) || isa<FPToSIInst>(J)) {
376 CastInst *CI = cast<CastInst>(J);
377 if (CI->getSrcTy()->getScalarType()->isPPC_FP128Ty() ||
378 CI->getDestTy()->getScalarType()->isPPC_FP128Ty() ||
379 isLargeIntegerTy(TT.isArch32Bit(), CI->getSrcTy()->getScalarType()) ||
380 isLargeIntegerTy(TT.isArch32Bit(), CI->getDestTy()->getScalarType()))
382 } else if (isLargeIntegerTy(TT.isArch32Bit(),
383 J->getType()->getScalarType()) &&
384 (J->getOpcode() == Instruction::UDiv ||
385 J->getOpcode() == Instruction::SDiv ||
386 J->getOpcode() == Instruction::URem ||
387 J->getOpcode() == Instruction::SRem)) {
389 } else if (TT.isArch32Bit() &&
390 isLargeIntegerTy(false, J->getType()->getScalarType()) &&
391 (J->getOpcode() == Instruction::Shl ||
392 J->getOpcode() == Instruction::AShr ||
393 J->getOpcode() == Instruction::LShr)) {
394 // Only on PPC32, for 128-bit integers (specifically not 64-bit
395 // integers), these might be runtime calls.
397 } else if (isa<IndirectBrInst>(J) || isa<InvokeInst>(J)) {
398 // On PowerPC, indirect jumps use the counter register.
400 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(J)) {
403 const TargetLowering *TLI = TM->getSubtargetImpl()->getTargetLowering();
405 if (SI->getNumCases() + 1 >= (unsigned)TLI->getMinimumJumpTableEntries())
408 for (Value *Operand : J->operands())
409 if (memAddrUsesCTR(TM, Operand))
416 bool PPCCTRLoops::convertToCTRLoop(Loop *L) {
417 bool MadeChange = false;
419 Triple TT = Triple(L->getHeader()->getParent()->getParent()->
421 if (!TT.isArch32Bit() && !TT.isArch64Bit())
422 return MadeChange; // Unknown arch. type.
424 // Process nested loops first.
425 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) {
426 MadeChange |= convertToCTRLoop(*I);
429 // If a nested loop has been converted, then we can't convert this loop.
434 // Stop trying after reaching the limit (if any).
435 int Limit = CTRLoopLimit;
437 if (Counter >= CTRLoopLimit)
443 // We don't want to spill/restore the counter register, and so we don't
444 // want to use the counter register if the loop contains calls.
445 for (Loop::block_iterator I = L->block_begin(), IE = L->block_end();
447 if (mightUseCTR(TT, *I))
450 SmallVector<BasicBlock*, 4> ExitingBlocks;
451 L->getExitingBlocks(ExitingBlocks);
453 BasicBlock *CountedExitBlock = nullptr;
454 const SCEV *ExitCount = nullptr;
455 BranchInst *CountedExitBranch = nullptr;
456 for (SmallVectorImpl<BasicBlock *>::iterator I = ExitingBlocks.begin(),
457 IE = ExitingBlocks.end(); I != IE; ++I) {
458 const SCEV *EC = SE->getExitCount(L, *I);
459 DEBUG(dbgs() << "Exit Count for " << *L << " from block " <<
460 (*I)->getName() << ": " << *EC << "\n");
461 if (isa<SCEVCouldNotCompute>(EC))
463 if (const SCEVConstant *ConstEC = dyn_cast<SCEVConstant>(EC)) {
464 if (ConstEC->getValue()->isZero())
466 } else if (!SE->isLoopInvariant(EC, L))
469 if (SE->getTypeSizeInBits(EC->getType()) > (TT.isArch64Bit() ? 64 : 32))
472 // We now have a loop-invariant count of loop iterations (which is not the
473 // constant zero) for which we know that this loop will not exit via this
476 // We need to make sure that this block will run on every loop iteration.
477 // For this to be true, we must dominate all blocks with backedges. Such
478 // blocks are in-loop predecessors to the header block.
479 bool NotAlways = false;
480 for (pred_iterator PI = pred_begin(L->getHeader()),
481 PIE = pred_end(L->getHeader()); PI != PIE; ++PI) {
482 if (!L->contains(*PI))
485 if (!DT->dominates(*I, *PI)) {
494 // Make sure this blocks ends with a conditional branch.
495 Instruction *TI = (*I)->getTerminator();
499 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
500 if (!BI->isConditional())
503 CountedExitBranch = BI;
507 // Note that this block may not be the loop latch block, even if the loop
508 // has a latch block.
509 CountedExitBlock = *I;
514 if (!CountedExitBlock)
517 BasicBlock *Preheader = L->getLoopPreheader();
519 // If we don't have a preheader, then insert one. If we already have a
520 // preheader, then we can use it (except if the preheader contains a use of
521 // the CTR register because some such uses might be reordered by the
522 // selection DAG after the mtctr instruction).
523 if (!Preheader || mightUseCTR(TT, Preheader))
524 Preheader = InsertPreheaderForLoop(L, this);
528 DEBUG(dbgs() << "Preheader for exit count: " << Preheader->getName() << "\n");
530 // Insert the count into the preheader and replace the condition used by the
534 SCEVExpander SCEVE(*SE, "loopcnt");
535 LLVMContext &C = SE->getContext();
536 Type *CountType = TT.isArch64Bit() ? Type::getInt64Ty(C) :
538 if (!ExitCount->getType()->isPointerTy() &&
539 ExitCount->getType() != CountType)
540 ExitCount = SE->getZeroExtendExpr(ExitCount, CountType);
541 ExitCount = SE->getAddExpr(ExitCount,
542 SE->getConstant(CountType, 1));
543 Value *ECValue = SCEVE.expandCodeFor(ExitCount, CountType,
544 Preheader->getTerminator());
546 IRBuilder<> CountBuilder(Preheader->getTerminator());
547 Module *M = Preheader->getParent()->getParent();
548 Value *MTCTRFunc = Intrinsic::getDeclaration(M, Intrinsic::ppc_mtctr,
550 CountBuilder.CreateCall(MTCTRFunc, ECValue);
552 IRBuilder<> CondBuilder(CountedExitBranch);
554 Intrinsic::getDeclaration(M, Intrinsic::ppc_is_decremented_ctr_nonzero);
555 Value *NewCond = CondBuilder.CreateCall(DecFunc);
556 Value *OldCond = CountedExitBranch->getCondition();
557 CountedExitBranch->setCondition(NewCond);
559 // The false branch must exit the loop.
560 if (!L->contains(CountedExitBranch->getSuccessor(0)))
561 CountedExitBranch->swapSuccessors();
563 // The old condition may be dead now, and may have even created a dead PHI
564 // (the original induction variable).
565 RecursivelyDeleteTriviallyDeadInstructions(OldCond);
566 DeleteDeadPHIs(CountedExitBlock);
573 static bool clobbersCTR(const MachineInstr *MI) {
574 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
575 const MachineOperand &MO = MI->getOperand(i);
577 if (MO.isDef() && (MO.getReg() == PPC::CTR || MO.getReg() == PPC::CTR8))
579 } else if (MO.isRegMask()) {
580 if (MO.clobbersPhysReg(PPC::CTR) || MO.clobbersPhysReg(PPC::CTR8))
588 static bool verifyCTRBranch(MachineBasicBlock *MBB,
589 MachineBasicBlock::iterator I) {
590 MachineBasicBlock::iterator BI = I;
591 SmallSet<MachineBasicBlock *, 16> Visited;
592 SmallVector<MachineBasicBlock *, 8> Preds;
595 if (I == MBB->begin()) {
607 for (MachineBasicBlock::iterator IE = MBB->begin();; --I) {
608 unsigned Opc = I->getOpcode();
609 if (Opc == PPC::MTCTRloop || Opc == PPC::MTCTR8loop) {
614 if (I != BI && clobbersCTR(I)) {
615 DEBUG(dbgs() << "BB#" << MBB->getNumber() << " (" <<
616 MBB->getFullName() << ") instruction " << *I <<
617 " clobbers CTR, invalidating " << "BB#" <<
618 BI->getParent()->getNumber() << " (" <<
619 BI->getParent()->getFullName() << ") instruction " <<
628 if (!CheckPreds && Preds.empty())
633 if (MachineFunction::iterator(MBB) == MBB->getParent()->begin()) {
634 DEBUG(dbgs() << "Unable to find a MTCTR instruction for BB#" <<
635 BI->getParent()->getNumber() << " (" <<
636 BI->getParent()->getFullName() << ") instruction " <<
641 for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
642 PIE = MBB->pred_end(); PI != PIE; ++PI)
643 Preds.push_back(*PI);
647 MBB = Preds.pop_back_val();
648 if (!Visited.count(MBB)) {
649 I = MBB->getLastNonDebugInstr();
652 } while (!Preds.empty());
657 bool PPCCTRLoopsVerify::runOnMachineFunction(MachineFunction &MF) {
658 MDT = &getAnalysis<MachineDominatorTree>();
660 // Verify that all bdnz/bdz instructions are dominated by a loop mtctr before
661 // any other instructions that might clobber the ctr register.
662 for (MachineFunction::iterator I = MF.begin(), IE = MF.end();
664 MachineBasicBlock *MBB = I;
665 if (!MDT->isReachableFromEntry(MBB))
668 for (MachineBasicBlock::iterator MII = MBB->getFirstTerminator(),
669 MIIE = MBB->end(); MII != MIIE; ++MII) {
670 unsigned Opc = MII->getOpcode();
671 if (Opc == PPC::BDNZ8 || Opc == PPC::BDNZ ||
672 Opc == PPC::BDZ8 || Opc == PPC::BDZ)
673 if (!verifyCTRBranch(MBB, MII))
674 llvm_unreachable("Invalid PPC CTR loop!");