namespace {
-class LoopIdiomRecognize;
-
-/// This class is to recoginize idioms of population-count conducted in
-/// a noncountable loop. Currently it only recognizes this pattern:
-/// \code
-/// while(x) {cnt++; ...; x &= x - 1; ...}
-/// \endcode
-class NclPopcountRecognize {
- LoopIdiomRecognize &LIR;
- Loop *CurLoop;
- BasicBlock *PreCondBB;
-
- typedef IRBuilder<> IRBuilderTy;
-
-public:
- explicit NclPopcountRecognize(LoopIdiomRecognize &TheLIR);
- bool recognize();
-
-private:
- /// Take a glimpse of the loop to see if we need to go ahead recoginizing
- /// the idiom.
- bool preliminaryScreen();
-
- /// Check if the given conditional branch is based on the comparison
- /// between a variable and zero, and if the variable is non-zero, the
- /// control yields to the loop entry. If the branch matches the behavior,
- /// the variable involved in the comparion is returned. This function will
- /// be called to see if the precondition and postcondition of the loop
- /// are in desirable form.
- Value *matchCondition(BranchInst *Br, BasicBlock *NonZeroTarget) const;
-
- /// Return true iff the idiom is detected in the loop. and 1) \p CntInst
- /// is set to the instruction counting the population bit. 2) \p CntPhi
- /// is set to the corresponding phi node. 3) \p Var is set to the value
- /// whose population bits are being counted.
- bool detectIdiom(Instruction *&CntInst, PHINode *&CntPhi, Value *&Var) const;
-
- /// Insert ctpop intrinsic function and some obviously dead instructions.
- void transform(Instruction *CntInst, PHINode *CntPhi, Value *Var);
-
- /// Create llvm.ctpop.* intrinsic function.
- CallInst *createPopcntIntrinsic(IRBuilderTy &IRB, Value *Val, DebugLoc DL);
-};
-
class LoopIdiomRecognize : public LoopPass {
Loop *CurLoop;
DominatorTree *DT;
bool runOnNoncountableLoop();
+ bool recognizePopcount();
+ void transformLoopToPopcount(BasicBlock *PreCondBB, Instruction *CntInst,
+ PHINode *CntPhi, Value *Var);
+
/// @}
};
//===----------------------------------------------------------------------===//
//
-// Implementation of NclPopcountRecognize
+// Implementation of LoopIdiomRecognize
//
//===----------------------------------------------------------------------===//
-NclPopcountRecognize::NclPopcountRecognize(LoopIdiomRecognize &TheLIR)
- : LIR(TheLIR), CurLoop(TheLIR.getLoop()), PreCondBB(nullptr) {}
-
-bool NclPopcountRecognize::preliminaryScreen() {
- const TargetTransformInfo *TTI = LIR.getTargetTransformInfo();
- if (TTI->getPopcntSupport(32) != TargetTransformInfo::PSK_FastHardware)
- return false;
-
- // Counting population are usually conducted by few arithmetic instructions.
- // Such instructions can be easilly "absorbed" by vacant slots in a
- // non-compact loop. Therefore, recognizing popcount idiom only makes sense
- // in a compact loop.
-
- // Give up if the loop has multiple blocks or multiple backedges.
- if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
+bool LoopIdiomRecognize::runOnLoop(Loop *L, LPPassManager &LPM) {
+ if (skipOptnoneFunction(L))
return false;
- BasicBlock *LoopBody = *(CurLoop->block_begin());
- if (LoopBody->size() >= 20) {
- // The loop is too big, bail out.
- return false;
- }
+ CurLoop = L;
- // It should have a preheader containing nothing but an unconditional branch.
- BasicBlock *PH = CurLoop->getLoopPreheader();
- if (!PH)
- return false;
- if (&PH->front() != PH->getTerminator())
- return false;
- auto *EntryBI = dyn_cast<BranchInst>(PH->getTerminator());
- if (!EntryBI || EntryBI->isConditional())
+ // If the loop could not be converted to canonical form, it must have an
+ // indirectbr in it, just give up.
+ if (!L->getLoopPreheader())
return false;
- // It should have a precondition block where the generated popcount instrinsic
- // function can be inserted.
- PreCondBB = PH->getSinglePredecessor();
- if (!PreCondBB)
- return false;
- auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator());
- if (!PreCondBI || PreCondBI->isUnconditional())
+ // Disable loop idiom recognition if the function's name is a common idiom.
+ StringRef Name = L->getHeader()->getParent()->getName();
+ if (Name == "memset" || Name == "memcpy")
return false;
- return true;
+ SE = &getAnalysis<ScalarEvolution>();
+ if (SE->hasLoopInvariantBackedgeTakenCount(L))
+ return runOnCountableLoop();
+ return runOnNoncountableLoop();
}
-Value *NclPopcountRecognize::matchCondition(BranchInst *Br,
- BasicBlock *LoopEntry) const {
- if (!Br || !Br->isConditional())
- return nullptr;
+bool LoopIdiomRecognize::runOnCountableLoop() {
+ const SCEV *BECount = SE->getBackedgeTakenCount(CurLoop);
+ assert(!isa<SCEVCouldNotCompute>(BECount) &&
+ "runOnCountableLoop() called on a loop without a predictable"
+ "backedge-taken count");
- ICmpInst *Cond = dyn_cast<ICmpInst>(Br->getCondition());
- if (!Cond)
- return nullptr;
+ // If this loop executes exactly one time, then it should be peeled, not
+ // optimized by this pass.
+ if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
+ if (BECst->getValue()->getValue() == 0)
+ return false;
- ConstantInt *CmpZero = dyn_cast<ConstantInt>(Cond->getOperand(1));
- if (!CmpZero || !CmpZero->isZero())
- return nullptr;
+ // set DT
+ (void)getDominatorTree();
- ICmpInst::Predicate Pred = Cond->getPredicate();
- if ((Pred == ICmpInst::ICMP_NE && Br->getSuccessor(0) == LoopEntry) ||
- (Pred == ICmpInst::ICMP_EQ && Br->getSuccessor(1) == LoopEntry))
- return Cond->getOperand(0);
+ LoopInfo &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
+ TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
- return nullptr;
-}
+ // set TLI
+ (void)getTargetLibraryInfo();
-bool NclPopcountRecognize::detectIdiom(Instruction *&CntInst, PHINode *&CntPhi,
- Value *&Var) const {
- // Following code tries to detect this idiom:
- //
- // if (x0 != 0)
- // goto loop-exit // the precondition of the loop
- // cnt0 = init-val;
- // do {
- // x1 = phi (x0, x2);
- // cnt1 = phi(cnt0, cnt2);
- //
- // cnt2 = cnt1 + 1;
- // ...
- // x2 = x1 & (x1 - 1);
- // ...
- // } while(x != 0);
- //
- // loop-exit:
- //
+ SmallVector<BasicBlock *, 8> ExitBlocks;
+ CurLoop->getUniqueExitBlocks(ExitBlocks);
- // step 1: Check to see if the look-back branch match this pattern:
- // "if (a!=0) goto loop-entry".
- BasicBlock *LoopEntry;
- Instruction *DefX2, *CountInst;
- Value *VarX1, *VarX0;
- PHINode *PhiX, *CountPhi;
+ DEBUG(dbgs() << "loop-idiom Scanning: F["
+ << CurLoop->getHeader()->getParent()->getName() << "] Loop %"
+ << CurLoop->getHeader()->getName() << "\n");
- DefX2 = CountInst = nullptr;
- VarX1 = VarX0 = nullptr;
- PhiX = CountPhi = nullptr;
- LoopEntry = *(CurLoop->block_begin());
+ bool MadeChange = false;
+ // Scan all the blocks in the loop that are not in subloops.
+ for (auto *BB : CurLoop->getBlocks()) {
+ // Ignore blocks in subloops.
+ if (LI.getLoopFor(BB) != CurLoop)
+ continue;
- // step 1: Check if the loop-back branch is in desirable form.
- {
- if (Value *T = matchCondition(
- dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry))
- DefX2 = dyn_cast<Instruction>(T);
- else
- return false;
+ MadeChange |= runOnLoopBlock(BB, BECount, ExitBlocks);
}
+ return MadeChange;
+}
- // step 2: detect instructions corresponding to "x2 = x1 & (x1 - 1)"
- {
- if (!DefX2 || DefX2->getOpcode() != Instruction::And)
+/// runOnLoopBlock - Process the specified block, which lives in a counted loop
+/// with the specified backedge count. This block is known to be in the current
+/// loop and not in any subloops.
+bool LoopIdiomRecognize::runOnLoopBlock(
+ BasicBlock *BB, const SCEV *BECount,
+ SmallVectorImpl<BasicBlock *> &ExitBlocks) {
+ // We can only promote stores in this block if they are unconditionally
+ // executed in the loop. For a block to be unconditionally executed, it has
+ // to dominate all the exit blocks of the loop. Verify this now.
+ for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i)
+ if (!DT->dominates(BB, ExitBlocks[i]))
return false;
- BinaryOperator *SubOneOp;
+ bool MadeChange = false;
+ for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
+ Instruction *Inst = I++;
+ // Look for store instructions, which may be optimized to memset/memcpy.
+ if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
+ WeakVH InstPtr(I);
+ if (!processLoopStore(SI, BECount))
+ continue;
+ MadeChange = true;
- if ((SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(0))))
- VarX1 = DefX2->getOperand(1);
- else {
- VarX1 = DefX2->getOperand(0);
- SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(1));
+ // If processing the store invalidated our iterator, start over from the
+ // top of the block.
+ if (!InstPtr)
+ I = BB->begin();
+ continue;
}
- if (!SubOneOp)
- return false;
- Instruction *SubInst = cast<Instruction>(SubOneOp);
- ConstantInt *Dec = dyn_cast<ConstantInt>(SubInst->getOperand(1));
- if (!Dec ||
- !((SubInst->getOpcode() == Instruction::Sub && Dec->isOne()) ||
- (SubInst->getOpcode() == Instruction::Add &&
- Dec->isAllOnesValue()))) {
- return false;
- }
- }
+ // Look for memset instructions, which may be optimized to a larger memset.
+ if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst)) {
+ WeakVH InstPtr(I);
+ if (!processLoopMemSet(MSI, BECount))
+ continue;
+ MadeChange = true;
- // step 3: Check the recurrence of variable X
- {
- PhiX = dyn_cast<PHINode>(VarX1);
- if (!PhiX ||
- (PhiX->getOperand(0) != DefX2 && PhiX->getOperand(1) != DefX2)) {
- return false;
+ // If processing the memset invalidated our iterator, start over from the
+ // top of the block.
+ if (!InstPtr)
+ I = BB->begin();
+ continue;
}
}
- // step 4: Find the instruction which count the population: cnt2 = cnt1 + 1
- {
- CountInst = nullptr;
- for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI(),
- IterE = LoopEntry->end();
- Iter != IterE; Iter++) {
- Instruction *Inst = Iter;
- if (Inst->getOpcode() != Instruction::Add)
- continue;
+ return MadeChange;
+}
- ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1));
- if (!Inc || !Inc->isOne())
- continue;
+/// processLoopStore - See if this store can be promoted to a memset or memcpy.
+bool LoopIdiomRecognize::processLoopStore(StoreInst *SI, const SCEV *BECount) {
+ if (!SI->isSimple())
+ return false;
- PHINode *Phi = dyn_cast<PHINode>(Inst->getOperand(0));
- if (!Phi || Phi->getParent() != LoopEntry)
- continue;
+ Value *StoredVal = SI->getValueOperand();
+ Value *StorePtr = SI->getPointerOperand();
- // Check if the result of the instruction is live of the loop.
- bool LiveOutLoop = false;
- for (User *U : Inst->users()) {
- if ((cast<Instruction>(U))->getParent() != LoopEntry) {
- LiveOutLoop = true;
- break;
- }
- }
+ // Reject stores that are so large that they overflow an unsigned.
+ auto &DL = CurLoop->getHeader()->getModule()->getDataLayout();
+ uint64_t SizeInBits = DL.getTypeSizeInBits(StoredVal->getType());
+ if ((SizeInBits & 7) || (SizeInBits >> 32) != 0)
+ return false;
- if (LiveOutLoop) {
- CountInst = Inst;
- CountPhi = Phi;
- break;
- }
- }
+ // See if the pointer expression is an AddRec like {base,+,1} on the current
+ // loop, which indicates a strided store. If we have something else, it's a
+ // random store we can't handle.
+ const SCEVAddRecExpr *StoreEv =
+ dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
+ if (!StoreEv || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine())
+ return false;
- if (!CountInst)
- return false;
- }
+ // Check to see if the stride matches the size of the store. If so, then we
+ // know that every byte is touched in the loop.
+ unsigned StoreSize = (unsigned)SizeInBits >> 3;
+ const SCEVConstant *Stride = dyn_cast<SCEVConstant>(StoreEv->getOperand(1));
- // step 5: check if the precondition is in this form:
- // "if (x != 0) goto loop-head ; else goto somewhere-we-don't-care;"
- {
- auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator());
- Value *T = matchCondition(PreCondBr, CurLoop->getLoopPreheader());
- if (T != PhiX->getOperand(0) && T != PhiX->getOperand(1))
- return false;
+ if (!Stride || StoreSize != Stride->getValue()->getValue()) {
+ // TODO: Could also handle negative stride here someday, that will require
+ // the validity check in mayLoopAccessLocation to be updated though.
+ // Enable this to print exact negative strides.
+ if (0 && Stride && StoreSize == -Stride->getValue()->getValue()) {
+ dbgs() << "NEGATIVE STRIDE: " << *SI << "\n";
+ dbgs() << "BB: " << *SI->getParent();
+ }
- CntInst = CountInst;
- CntPhi = CountPhi;
- Var = T;
+ return false;
}
- return true;
-}
-
-void NclPopcountRecognize::transform(Instruction *CntInst, PHINode *CntPhi,
- Value *Var) {
-
- ScalarEvolution *SE = LIR.getScalarEvolution();
- TargetLibraryInfo *TLI = LIR.getTargetLibraryInfo();
- BasicBlock *PreHead = CurLoop->getLoopPreheader();
- auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator());
- const DebugLoc DL = CntInst->getDebugLoc();
-
- // Assuming before transformation, the loop is following:
- // if (x) // the precondition
- // do { cnt++; x &= x - 1; } while(x);
+ // See if we can optimize just this store in isolation.
+ if (processLoopStridedStore(StorePtr, StoreSize, SI->getAlignment(),
+ StoredVal, SI, StoreEv, BECount))
+ return true;
- // Step 1: Insert the ctpop instruction at the end of the precondition block
- IRBuilderTy Builder(PreCondBr);
- Value *PopCnt, *PopCntZext, *NewCount, *TripCnt;
- {
- PopCnt = createPopcntIntrinsic(Builder, Var, DL);
- NewCount = PopCntZext =
- Builder.CreateZExtOrTrunc(PopCnt, cast<IntegerType>(CntPhi->getType()));
+ // If the stored value is a strided load in the same loop with the same stride
+ // this this may be transformable into a memcpy. This kicks in for stuff like
+ // for (i) A[i] = B[i];
+ if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
+ const SCEVAddRecExpr *LoadEv =
+ dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getOperand(0)));
+ if (LoadEv && LoadEv->getLoop() == CurLoop && LoadEv->isAffine() &&
+ StoreEv->getOperand(1) == LoadEv->getOperand(1) && LI->isSimple())
+ if (processLoopStoreOfLoopLoad(SI, StoreSize, StoreEv, LoadEv, BECount))
+ return true;
+ }
+ // errs() << "UNHANDLED strided store: " << *StoreEv << " - " << *SI << "\n";
- if (NewCount != PopCnt)
- (cast<Instruction>(NewCount))->setDebugLoc(DL);
+ return false;
+}
- // TripCnt is exactly the number of iterations the loop has
- TripCnt = NewCount;
+/// processLoopMemSet - See if this memset can be promoted to a large memset.
+bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI,
+ const SCEV *BECount) {
+ // We can only handle non-volatile memsets with a constant size.
+ if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength()))
+ return false;
- // If the population counter's initial value is not zero, insert Add Inst.
- Value *CntInitVal = CntPhi->getIncomingValueForBlock(PreHead);
- ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
- if (!InitConst || !InitConst->isZero()) {
- NewCount = Builder.CreateAdd(NewCount, CntInitVal);
- (cast<Instruction>(NewCount))->setDebugLoc(DL);
- }
- }
+ // If we're not allowed to hack on memset, we fail.
+ if (!TLI->has(LibFunc::memset))
+ return false;
- // Step 2: Replace the precondition from "if(x == 0) goto loop-exit" to
- // "if(NewCount == 0) loop-exit". Withtout this change, the intrinsic
- // function would be partial dead code, and downstream passes will drag
- // it back from the precondition block to the preheader.
- {
- ICmpInst *PreCond = cast<ICmpInst>(PreCondBr->getCondition());
+ Value *Pointer = MSI->getDest();
- Value *Opnd0 = PopCntZext;
- Value *Opnd1 = ConstantInt::get(PopCntZext->getType(), 0);
- if (PreCond->getOperand(0) != Var)
- std::swap(Opnd0, Opnd1);
+ // See if the pointer expression is an AddRec like {base,+,1} on the current
+ // loop, which indicates a strided store. If we have something else, it's a
+ // random store we can't handle.
+ const SCEVAddRecExpr *Ev = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Pointer));
+ if (!Ev || Ev->getLoop() != CurLoop || !Ev->isAffine())
+ return false;
- ICmpInst *NewPreCond = cast<ICmpInst>(
- Builder.CreateICmp(PreCond->getPredicate(), Opnd0, Opnd1));
- PreCondBr->setCondition(NewPreCond);
+ // Reject memsets that are so large that they overflow an unsigned.
+ uint64_t SizeInBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue();
+ if ((SizeInBytes >> 32) != 0)
+ return false;
- RecursivelyDeleteTriviallyDeadInstructions(PreCond, TLI);
- }
+ // Check to see if the stride matches the size of the memset. If so, then we
+ // know that every byte is touched in the loop.
+ const SCEVConstant *Stride = dyn_cast<SCEVConstant>(Ev->getOperand(1));
- // Step 3: Note that the population count is exactly the trip count of the
- // loop in question, which enble us to to convert the loop from noncountable
- // loop into a countable one. The benefit is twofold:
- //
- // - If the loop only counts population, the entire loop become dead after
- // the transformation. It is lots easier to prove a countable loop dead
- // than to prove a noncountable one. (In some C dialects, a infite loop
- // isn't dead even if it computes nothing useful. In general, DCE needs
- // to prove a noncountable loop finite before safely delete it.)
- //
- // - If the loop also performs something else, it remains alive.
- // Since it is transformed to countable form, it can be aggressively
- // optimized by some optimizations which are in general not applicable
- // to a noncountable loop.
- //
- // After this step, this loop (conceptually) would look like following:
- // newcnt = __builtin_ctpop(x);
- // t = newcnt;
- // if (x)
- // do { cnt++; x &= x-1; t--) } while (t > 0);
- BasicBlock *Body = *(CurLoop->block_begin());
- {
- auto *LbBr = dyn_cast<BranchInst>(Body->getTerminator());
- ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition());
- Type *Ty = TripCnt->getType();
+ // TODO: Could also handle negative stride here someday, that will require the
+ // validity check in mayLoopAccessLocation to be updated though.
+ if (!Stride || MSI->getLength() != Stride->getValue())
+ return false;
- PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", Body->begin());
+ return processLoopStridedStore(Pointer, (unsigned)SizeInBytes,
+ MSI->getAlignment(), MSI->getValue(), MSI, Ev,
+ BECount);
+}
- Builder.SetInsertPoint(LbCond);
- Value *Opnd1 = cast<Value>(TcPhi);
- Value *Opnd2 = cast<Value>(ConstantInt::get(Ty, 1));
- Instruction *TcDec = cast<Instruction>(
- Builder.CreateSub(Opnd1, Opnd2, "tcdec", false, true));
+/// mayLoopAccessLocation - Return true if the specified loop might access the
+/// specified pointer location, which is a loop-strided access. The 'Access'
+/// argument specifies what the verboten forms of access are (read or write).
+static bool mayLoopAccessLocation(Value *Ptr, ModRefInfo Access, Loop *L,
+ const SCEV *BECount, unsigned StoreSize,
+ AliasAnalysis &AA,
+ Instruction *IgnoredStore) {
+ // Get the location that may be stored across the loop. Since the access is
+ // strided positively through memory, we say that the modified location starts
+ // at the pointer and has infinite size.
+ uint64_t AccessSize = MemoryLocation::UnknownSize;
- TcPhi->addIncoming(TripCnt, PreHead);
- TcPhi->addIncoming(TcDec, Body);
+ // If the loop iterates a fixed number of times, we can refine the access size
+ // to be exactly the size of the memset, which is (BECount+1)*StoreSize
+ if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
+ AccessSize = (BECst->getValue()->getZExtValue() + 1) * StoreSize;
- CmpInst::Predicate Pred =
- (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_UGT : CmpInst::ICMP_SLE;
- LbCond->setPredicate(Pred);
- LbCond->setOperand(0, TcDec);
- LbCond->setOperand(1, cast<Value>(ConstantInt::get(Ty, 0)));
- }
+ // TODO: For this to be really effective, we have to dive into the pointer
+ // operand in the store. Store to &A[i] of 100 will always return may alias
+ // with store of &A[100], we need to StoreLoc to be "A" with size of 100,
+ // which will then no-alias a store to &A[100].
+ MemoryLocation StoreLoc(Ptr, AccessSize);
- // Step 4: All the references to the original population counter outside
- // the loop are replaced with the NewCount -- the value returned from
- // __builtin_ctpop().
- CntInst->replaceUsesOutsideBlock(NewCount, Body);
+ for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E;
+ ++BI)
+ for (BasicBlock::iterator I = (*BI)->begin(), E = (*BI)->end(); I != E; ++I)
+ if (&*I != IgnoredStore && (AA.getModRefInfo(I, StoreLoc) & Access))
+ return true;
- // step 5: Forget the "non-computable" trip-count SCEV associated with the
- // loop. The loop would otherwise not be deleted even if it becomes empty.
- SE->forgetLoop(CurLoop);
+ return false;
}
-CallInst *NclPopcountRecognize::createPopcntIntrinsic(IRBuilderTy &IRBuilder,
- Value *Val, DebugLoc DL) {
- Value *Ops[] = {Val};
- Type *Tys[] = {Val->getType()};
-
- Module *M = (*(CurLoop->block_begin()))->getParent()->getParent();
- Value *Func = Intrinsic::getDeclaration(M, Intrinsic::ctpop, Tys);
- CallInst *CI = IRBuilder.CreateCall(Func, Ops);
- CI->setDebugLoc(DL);
+/// getMemSetPatternValue - If a strided store of the specified value is safe to
+/// turn into a memset_pattern16, return a ConstantArray of 16 bytes that should
+/// be passed in. Otherwise, return null.
+///
+/// Note that we don't ever attempt to use memset_pattern8 or 4, because these
+/// just replicate their input array and then pass on to memset_pattern16.
+static Constant *getMemSetPatternValue(Value *V, const DataLayout &DL) {
+ // If the value isn't a constant, we can't promote it to being in a constant
+ // array. We could theoretically do a store to an alloca or something, but
+ // that doesn't seem worthwhile.
+ Constant *C = dyn_cast<Constant>(V);
+ if (!C)
+ return nullptr;
- return CI;
-}
+ // Only handle simple values that are a power of two bytes in size.
+ uint64_t Size = DL.getTypeSizeInBits(V->getType());
+ if (Size == 0 || (Size & 7) || (Size & (Size - 1)))
+ return nullptr;
-/// recognize - detect population count idiom in a non-countable loop. If
-/// detected, transform the relevant code to popcount intrinsic function
-/// call, and return true; otherwise, return false.
-bool NclPopcountRecognize::recognize() {
- if (!LIR.getTargetTransformInfo())
- return false;
+ // Don't care enough about darwin/ppc to implement this.
+ if (DL.isBigEndian())
+ return nullptr;
- LIR.getScalarEvolution();
+ // Convert to size in bytes.
+ Size /= 8;
- if (!preliminaryScreen())
- return false;
+ // TODO: If CI is larger than 16-bytes, we can try slicing it in half to see
+ // if the top and bottom are the same (e.g. for vectors and large integers).
+ if (Size > 16)
+ return nullptr;
- Instruction *CntInst;
- PHINode *CntPhi;
- Value *Val;
- if (!detectIdiom(CntInst, CntPhi, Val))
- return false;
+ // If the constant is exactly 16 bytes, just use it.
+ if (Size == 16)
+ return C;
- transform(CntInst, CntPhi, Val);
- return true;
+ // Otherwise, we'll use an array of the constants.
+ unsigned ArraySize = 16 / Size;
+ ArrayType *AT = ArrayType::get(V->getType(), ArraySize);
+ return ConstantArray::get(AT, std::vector<Constant *>(ArraySize, C));
}
-//===----------------------------------------------------------------------===//
-//
-// Implementation of LoopIdiomRecognize
-//
-//===----------------------------------------------------------------------===//
-
-bool LoopIdiomRecognize::runOnLoop(Loop *L, LPPassManager &LPM) {
- if (skipOptnoneFunction(L))
- return false;
+/// processLoopStridedStore - We see a strided store of some value. If we can
+/// transform this into a memset or memset_pattern in the loop preheader, do so.
+bool LoopIdiomRecognize::processLoopStridedStore(
+ Value *DestPtr, unsigned StoreSize, unsigned StoreAlignment,
+ Value *StoredVal, Instruction *TheStore, const SCEVAddRecExpr *Ev,
+ const SCEV *BECount) {
- CurLoop = L;
+ // If the stored value is a byte-wise value (like i32 -1), then it may be
+ // turned into a memset of i8 -1, assuming that all the consecutive bytes
+ // are stored. A store of i32 0x01020304 can never be turned into a memset,
+ // but it can be turned into memset_pattern if the target supports it.
+ Value *SplatValue = isBytewiseValue(StoredVal);
+ Constant *PatternValue = nullptr;
+ auto &DL = CurLoop->getHeader()->getModule()->getDataLayout();
+ unsigned DestAS = DestPtr->getType()->getPointerAddressSpace();
- // If the loop could not be converted to canonical form, it must have an
- // indirectbr in it, just give up.
- if (!L->getLoopPreheader())
+ // If we're allowed to form a memset, and the stored value would be acceptable
+ // for memset, use it.
+ if (SplatValue && TLI->has(LibFunc::memset) &&
+ // Verify that the stored value is loop invariant. If not, we can't
+ // promote the memset.
+ CurLoop->isLoopInvariant(SplatValue)) {
+ // Keep and use SplatValue.
+ PatternValue = nullptr;
+ } else if (DestAS == 0 && TLI->has(LibFunc::memset_pattern16) &&
+ (PatternValue = getMemSetPatternValue(StoredVal, DL))) {
+ // Don't create memset_pattern16s with address spaces.
+ // It looks like we can use PatternValue!
+ SplatValue = nullptr;
+ } else {
+ // Otherwise, this isn't an idiom we can transform. For example, we can't
+ // do anything with a 3-byte store.
return false;
+ }
- // Disable loop idiom recognition if the function's name is a common idiom.
- StringRef Name = L->getHeader()->getParent()->getName();
- if (Name == "memset" || Name == "memcpy")
- return false;
-
- SE = &getAnalysis<ScalarEvolution>();
- if (SE->hasLoopInvariantBackedgeTakenCount(L))
- return runOnCountableLoop();
- return runOnNoncountableLoop();
-}
-
-bool LoopIdiomRecognize::runOnCountableLoop() {
- const SCEV *BECount = SE->getBackedgeTakenCount(CurLoop);
- assert(!isa<SCEVCouldNotCompute>(BECount) &&
- "runOnCountableLoop() called on a loop without a predictable"
- "backedge-taken count");
-
- // If this loop executes exactly one time, then it should be peeled, not
- // optimized by this pass.
- if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
- if (BECst->getValue()->getValue() == 0)
- return false;
-
- // set DT
- (void)getDominatorTree();
+ // The trip count of the loop and the base pointer of the addrec SCEV is
+ // guaranteed to be loop invariant, which means that it should dominate the
+ // header. This allows us to insert code for it in the preheader.
+ BasicBlock *Preheader = CurLoop->getLoopPreheader();
+ IRBuilder<> Builder(Preheader->getTerminator());
+ SCEVExpander Expander(*SE, DL, "loop-idiom");
- LoopInfo &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
- TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
+ Type *DestInt8PtrTy = Builder.getInt8PtrTy(DestAS);
- // set TLI
- (void)getTargetLibraryInfo();
+ // Okay, we have a strided store "p[i]" of a splattable value. We can turn
+ // this into a memset in the loop preheader now if we want. However, this
+ // would be unsafe to do if there is anything else in the loop that may read
+ // or write to the aliased location. Check for any overlap by generating the
+ // base pointer and checking the region.
+ Value *BasePtr = Expander.expandCodeFor(Ev->getStart(), DestInt8PtrTy,
+ Preheader->getTerminator());
- SmallVector<BasicBlock *, 8> ExitBlocks;
- CurLoop->getUniqueExitBlocks(ExitBlocks);
+ if (mayLoopAccessLocation(BasePtr, MRI_ModRef, CurLoop, BECount, StoreSize,
+ getAnalysis<AliasAnalysis>(), TheStore)) {
+ Expander.clear();
+ // If we generated new code for the base pointer, clean up.
+ RecursivelyDeleteTriviallyDeadInstructions(BasePtr, TLI);
+ return false;
+ }
- DEBUG(dbgs() << "loop-idiom Scanning: F["
- << CurLoop->getHeader()->getParent()->getName() << "] Loop %"
- << CurLoop->getHeader()->getName() << "\n");
+ // Okay, everything looks good, insert the memset.
- bool MadeChange = false;
- // Scan all the blocks in the loop that are not in subloops.
- for (auto *BB : CurLoop->getBlocks()) {
- // Ignore blocks in subloops.
- if (LI.getLoopFor(BB) != CurLoop)
- continue;
+ // The # stored bytes is (BECount+1)*Size. Expand the trip count out to
+ // pointer size if it isn't already.
+ Type *IntPtr = Builder.getIntPtrTy(DL, DestAS);
+ BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
- MadeChange |= runOnLoopBlock(BB, BECount, ExitBlocks);
+ const SCEV *NumBytesS =
+ SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1), SCEV::FlagNUW);
+ if (StoreSize != 1) {
+ NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize),
+ SCEV::FlagNUW);
}
- return MadeChange;
-}
-/// runOnLoopBlock - Process the specified block, which lives in a counted loop
-/// with the specified backedge count. This block is known to be in the current
-/// loop and not in any subloops.
-bool LoopIdiomRecognize::runOnLoopBlock(
- BasicBlock *BB, const SCEV *BECount,
- SmallVectorImpl<BasicBlock *> &ExitBlocks) {
- // We can only promote stores in this block if they are unconditionally
- // executed in the loop. For a block to be unconditionally executed, it has
- // to dominate all the exit blocks of the loop. Verify this now.
- for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i)
- if (!DT->dominates(BB, ExitBlocks[i]))
- return false;
-
- bool MadeChange = false;
- for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
- Instruction *Inst = I++;
- // Look for store instructions, which may be optimized to memset/memcpy.
- if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
- WeakVH InstPtr(I);
- if (!processLoopStore(SI, BECount))
- continue;
- MadeChange = true;
+ Value *NumBytes =
+ Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator());
- // If processing the store invalidated our iterator, start over from the
- // top of the block.
- if (!InstPtr)
- I = BB->begin();
- continue;
- }
+ CallInst *NewCall;
+ if (SplatValue) {
+ NewCall =
+ Builder.CreateMemSet(BasePtr, SplatValue, NumBytes, StoreAlignment);
+ } else {
+ // Everything is emitted in default address space
+ Type *Int8PtrTy = DestInt8PtrTy;
- // Look for memset instructions, which may be optimized to a larger memset.
- if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst)) {
- WeakVH InstPtr(I);
- if (!processLoopMemSet(MSI, BECount))
- continue;
- MadeChange = true;
+ Module *M = TheStore->getParent()->getParent()->getParent();
+ Value *MSP =
+ M->getOrInsertFunction("memset_pattern16", Builder.getVoidTy(),
+ Int8PtrTy, Int8PtrTy, IntPtr, (void *)nullptr);
- // If processing the memset invalidated our iterator, start over from the
- // top of the block.
- if (!InstPtr)
- I = BB->begin();
- continue;
- }
+ // Otherwise we should form a memset_pattern16. PatternValue is known to be
+ // an constant array of 16-bytes. Plop the value into a mergable global.
+ GlobalVariable *GV = new GlobalVariable(*M, PatternValue->getType(), true,
+ GlobalValue::PrivateLinkage,
+ PatternValue, ".memset_pattern");
+ GV->setUnnamedAddr(true); // Ok to merge these.
+ GV->setAlignment(16);
+ Value *PatternPtr = ConstantExpr::getBitCast(GV, Int8PtrTy);
+ NewCall = Builder.CreateCall(MSP, {BasePtr, PatternPtr, NumBytes});
}
- return MadeChange;
-}
-
-/// processLoopStore - See if this store can be promoted to a memset or memcpy.
-bool LoopIdiomRecognize::processLoopStore(StoreInst *SI, const SCEV *BECount) {
- if (!SI->isSimple())
- return false;
+ DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n"
+ << " from store to: " << *Ev << " at: " << *TheStore << "\n");
+ NewCall->setDebugLoc(TheStore->getDebugLoc());
- Value *StoredVal = SI->getValueOperand();
- Value *StorePtr = SI->getPointerOperand();
+ // Okay, the memset has been formed. Zap the original store and anything that
+ // feeds into it.
+ deleteDeadInstruction(TheStore, TLI);
+ ++NumMemSet;
+ return true;
+}
- // Reject stores that are so large that they overflow an unsigned.
- auto &DL = CurLoop->getHeader()->getModule()->getDataLayout();
- uint64_t SizeInBits = DL.getTypeSizeInBits(StoredVal->getType());
- if ((SizeInBits & 7) || (SizeInBits >> 32) != 0)
+/// processLoopStoreOfLoopLoad - We see a strided store whose value is a
+/// same-strided load.
+bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(
+ StoreInst *SI, unsigned StoreSize, const SCEVAddRecExpr *StoreEv,
+ const SCEVAddRecExpr *LoadEv, const SCEV *BECount) {
+ // If we're not allowed to form memcpy, we fail.
+ if (!TLI->has(LibFunc::memcpy))
return false;
- // See if the pointer expression is an AddRec like {base,+,1} on the current
- // loop, which indicates a strided store. If we have something else, it's a
- // random store we can't handle.
- const SCEVAddRecExpr *StoreEv =
- dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
- if (!StoreEv || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine())
- return false;
+ LoadInst *LI = cast<LoadInst>(SI->getValueOperand());
- // Check to see if the stride matches the size of the store. If so, then we
- // know that every byte is touched in the loop.
- unsigned StoreSize = (unsigned)SizeInBits >> 3;
- const SCEVConstant *Stride = dyn_cast<SCEVConstant>(StoreEv->getOperand(1));
+ // The trip count of the loop and the base pointer of the addrec SCEV is
+ // guaranteed to be loop invariant, which means that it should dominate the
+ // header. This allows us to insert code for it in the preheader.
+ BasicBlock *Preheader = CurLoop->getLoopPreheader();
+ IRBuilder<> Builder(Preheader->getTerminator());
+ const DataLayout &DL = Preheader->getModule()->getDataLayout();
+ SCEVExpander Expander(*SE, DL, "loop-idiom");
- if (!Stride || StoreSize != Stride->getValue()->getValue()) {
- // TODO: Could also handle negative stride here someday, that will require
- // the validity check in mayLoopAccessLocation to be updated though.
- // Enable this to print exact negative strides.
- if (0 && Stride && StoreSize == -Stride->getValue()->getValue()) {
- dbgs() << "NEGATIVE STRIDE: " << *SI << "\n";
- dbgs() << "BB: " << *SI->getParent();
- }
+ // Okay, we have a strided store "p[i]" of a loaded value. We can turn
+ // this into a memcpy in the loop preheader now if we want. However, this
+ // would be unsafe to do if there is anything else in the loop that may read
+ // or write the memory region we're storing to. This includes the load that
+ // feeds the stores. Check for an alias by generating the base address and
+ // checking everything.
+ Value *StoreBasePtr = Expander.expandCodeFor(
+ StoreEv->getStart(), Builder.getInt8PtrTy(SI->getPointerAddressSpace()),
+ Preheader->getTerminator());
+ if (mayLoopAccessLocation(StoreBasePtr, MRI_ModRef, CurLoop, BECount,
+ StoreSize, getAnalysis<AliasAnalysis>(), SI)) {
+ Expander.clear();
+ // If we generated new code for the base pointer, clean up.
+ RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI);
return false;
}
- // See if we can optimize just this store in isolation.
- if (processLoopStridedStore(StorePtr, StoreSize, SI->getAlignment(),
- StoredVal, SI, StoreEv, BECount))
- return true;
+ // For a memcpy, we have to make sure that the input array is not being
+ // mutated by the loop.
+ Value *LoadBasePtr = Expander.expandCodeFor(
+ LoadEv->getStart(), Builder.getInt8PtrTy(LI->getPointerAddressSpace()),
+ Preheader->getTerminator());
- // If the stored value is a strided load in the same loop with the same stride
- // this this may be transformable into a memcpy. This kicks in for stuff like
- // for (i) A[i] = B[i];
- if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
- const SCEVAddRecExpr *LoadEv =
- dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getOperand(0)));
- if (LoadEv && LoadEv->getLoop() == CurLoop && LoadEv->isAffine() &&
- StoreEv->getOperand(1) == LoadEv->getOperand(1) && LI->isSimple())
- if (processLoopStoreOfLoopLoad(SI, StoreSize, StoreEv, LoadEv, BECount))
- return true;
+ if (mayLoopAccessLocation(LoadBasePtr, MRI_Mod, CurLoop, BECount, StoreSize,
+ getAnalysis<AliasAnalysis>(), SI)) {
+ Expander.clear();
+ // If we generated new code for the base pointer, clean up.
+ RecursivelyDeleteTriviallyDeadInstructions(LoadBasePtr, TLI);
+ RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI);
+ return false;
}
- // errs() << "UNHANDLED strided store: " << *StoreEv << " - " << *SI << "\n";
- return false;
-}
+ // Okay, everything is safe, we can transform this!
-/// processLoopMemSet - See if this memset can be promoted to a large memset.
-bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI,
- const SCEV *BECount) {
- // We can only handle non-volatile memsets with a constant size.
- if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength()))
- return false;
+ // The # stored bytes is (BECount+1)*Size. Expand the trip count out to
+ // pointer size if it isn't already.
+ Type *IntPtrTy = Builder.getIntPtrTy(DL, SI->getPointerAddressSpace());
+ BECount = SE->getTruncateOrZeroExtend(BECount, IntPtrTy);
- // If we're not allowed to hack on memset, we fail.
- if (!TLI->has(LibFunc::memset))
- return false;
+ const SCEV *NumBytesS =
+ SE->getAddExpr(BECount, SE->getConstant(IntPtrTy, 1), SCEV::FlagNUW);
+ if (StoreSize != 1)
+ NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtrTy, StoreSize),
+ SCEV::FlagNUW);
- Value *Pointer = MSI->getDest();
+ Value *NumBytes =
+ Expander.expandCodeFor(NumBytesS, IntPtrTy, Preheader->getTerminator());
- // See if the pointer expression is an AddRec like {base,+,1} on the current
- // loop, which indicates a strided store. If we have something else, it's a
- // random store we can't handle.
- const SCEVAddRecExpr *Ev = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Pointer));
- if (!Ev || Ev->getLoop() != CurLoop || !Ev->isAffine())
- return false;
+ CallInst *NewCall =
+ Builder.CreateMemCpy(StoreBasePtr, LoadBasePtr, NumBytes,
+ std::min(SI->getAlignment(), LI->getAlignment()));
+ NewCall->setDebugLoc(SI->getDebugLoc());
- // Reject memsets that are so large that they overflow an unsigned.
- uint64_t SizeInBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue();
- if ((SizeInBytes >> 32) != 0)
- return false;
+ DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n"
+ << " from load ptr=" << *LoadEv << " at: " << *LI << "\n"
+ << " from store ptr=" << *StoreEv << " at: " << *SI << "\n");
- // Check to see if the stride matches the size of the memset. If so, then we
- // know that every byte is touched in the loop.
- const SCEVConstant *Stride = dyn_cast<SCEVConstant>(Ev->getOperand(1));
+ // Okay, the memset has been formed. Zap the original store and anything that
+ // feeds into it.
+ deleteDeadInstruction(SI, TLI);
+ ++NumMemCpy;
+ return true;
+}
- // TODO: Could also handle negative stride here someday, that will require the
- // validity check in mayLoopAccessLocation to be updated though.
- if (!Stride || MSI->getLength() != Stride->getValue())
- return false;
+bool LoopIdiomRecognize::runOnNoncountableLoop() {
+ if (recognizePopcount())
+ return true;
- return processLoopStridedStore(Pointer, (unsigned)SizeInBytes,
- MSI->getAlignment(), MSI->getValue(), MSI, Ev,
- BECount);
+ return false;
}
-/// mayLoopAccessLocation - Return true if the specified loop might access the
-/// specified pointer location, which is a loop-strided access. The 'Access'
-/// argument specifies what the verboten forms of access are (read or write).
-static bool mayLoopAccessLocation(Value *Ptr, ModRefInfo Access, Loop *L,
- const SCEV *BECount, unsigned StoreSize,
- AliasAnalysis &AA,
- Instruction *IgnoredStore) {
- // Get the location that may be stored across the loop. Since the access is
- // strided positively through memory, we say that the modified location starts
- // at the pointer and has infinite size.
- uint64_t AccessSize = MemoryLocation::UnknownSize;
+/// Check if the given conditional branch is based on the comparison between
+/// a variable and zero, and if the variable is non-zero, the control yields to
+/// the loop entry. If the branch matches the behavior, the variable involved
+/// in the comparion is returned. This function will be called to see if the
+/// precondition and postcondition of the loop are in desirable form.
+static Value *matchCondition(BranchInst *BI, BasicBlock *LoopEntry) {
+ if (!BI || !BI->isConditional())
+ return nullptr;
- // If the loop iterates a fixed number of times, we can refine the access size
- // to be exactly the size of the memset, which is (BECount+1)*StoreSize
- if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
- AccessSize = (BECst->getValue()->getZExtValue() + 1) * StoreSize;
+ ICmpInst *Cond = dyn_cast<ICmpInst>(BI->getCondition());
+ if (!Cond)
+ return nullptr;
- // TODO: For this to be really effective, we have to dive into the pointer
- // operand in the store. Store to &A[i] of 100 will always return may alias
- // with store of &A[100], we need to StoreLoc to be "A" with size of 100,
- // which will then no-alias a store to &A[100].
- MemoryLocation StoreLoc(Ptr, AccessSize);
+ ConstantInt *CmpZero = dyn_cast<ConstantInt>(Cond->getOperand(1));
+ if (!CmpZero || !CmpZero->isZero())
+ return nullptr;
- for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E;
- ++BI)
- for (BasicBlock::iterator I = (*BI)->begin(), E = (*BI)->end(); I != E; ++I)
- if (&*I != IgnoredStore && (AA.getModRefInfo(I, StoreLoc) & Access))
- return true;
+ ICmpInst::Predicate Pred = Cond->getPredicate();
+ if ((Pred == ICmpInst::ICMP_NE && BI->getSuccessor(0) == LoopEntry) ||
+ (Pred == ICmpInst::ICMP_EQ && BI->getSuccessor(1) == LoopEntry))
+ return Cond->getOperand(0);
- return false;
+ return nullptr;
}
-/// getMemSetPatternValue - If a strided store of the specified value is safe to
-/// turn into a memset_pattern16, return a ConstantArray of 16 bytes that should
-/// be passed in. Otherwise, return null.
+/// Return true iff the idiom is detected in the loop.
///
-/// Note that we don't ever attempt to use memset_pattern8 or 4, because these
-/// just replicate their input array and then pass on to memset_pattern16.
-static Constant *getMemSetPatternValue(Value *V, const DataLayout &DL) {
- // If the value isn't a constant, we can't promote it to being in a constant
- // array. We could theoretically do a store to an alloca or something, but
- // that doesn't seem worthwhile.
- Constant *C = dyn_cast<Constant>(V);
- if (!C)
- return nullptr;
+/// Additionally:
+/// 1) \p CntInst is set to the instruction counting the population bit.
+/// 2) \p CntPhi is set to the corresponding phi node.
+/// 3) \p Var is set to the value whose population bits are being counted.
+///
+/// The core idiom we are trying to detect is:
+/// \code
+/// if (x0 != 0)
+/// goto loop-exit // the precondition of the loop
+/// cnt0 = init-val;
+/// do {
+/// x1 = phi (x0, x2);
+/// cnt1 = phi(cnt0, cnt2);
+///
+/// cnt2 = cnt1 + 1;
+/// ...
+/// x2 = x1 & (x1 - 1);
+/// ...
+/// } while(x != 0);
+///
+/// loop-exit:
+/// \endcode
+static bool detectPopcountIdiom(Loop *CurLoop, BasicBlock *PreCondBB,
+ Instruction *&CntInst, PHINode *&CntPhi,
+ Value *&Var) {
+ // step 1: Check to see if the look-back branch match this pattern:
+ // "if (a!=0) goto loop-entry".
+ BasicBlock *LoopEntry;
+ Instruction *DefX2, *CountInst;
+ Value *VarX1, *VarX0;
+ PHINode *PhiX, *CountPhi;
- // Only handle simple values that are a power of two bytes in size.
- uint64_t Size = DL.getTypeSizeInBits(V->getType());
- if (Size == 0 || (Size & 7) || (Size & (Size - 1)))
- return nullptr;
+ DefX2 = CountInst = nullptr;
+ VarX1 = VarX0 = nullptr;
+ PhiX = CountPhi = nullptr;
+ LoopEntry = *(CurLoop->block_begin());
- // Don't care enough about darwin/ppc to implement this.
- if (DL.isBigEndian())
- return nullptr;
+ // step 1: Check if the loop-back branch is in desirable form.
+ {
+ if (Value *T = matchCondition(
+ dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry))
+ DefX2 = dyn_cast<Instruction>(T);
+ else
+ return false;
+ }
- // Convert to size in bytes.
- Size /= 8;
+ // step 2: detect instructions corresponding to "x2 = x1 & (x1 - 1)"
+ {
+ if (!DefX2 || DefX2->getOpcode() != Instruction::And)
+ return false;
- // TODO: If CI is larger than 16-bytes, we can try slicing it in half to see
- // if the top and bottom are the same (e.g. for vectors and large integers).
- if (Size > 16)
- return nullptr;
+ BinaryOperator *SubOneOp;
- // If the constant is exactly 16 bytes, just use it.
- if (Size == 16)
- return C;
+ if ((SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(0))))
+ VarX1 = DefX2->getOperand(1);
+ else {
+ VarX1 = DefX2->getOperand(0);
+ SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(1));
+ }
+ if (!SubOneOp)
+ return false;
- // Otherwise, we'll use an array of the constants.
- unsigned ArraySize = 16 / Size;
- ArrayType *AT = ArrayType::get(V->getType(), ArraySize);
- return ConstantArray::get(AT, std::vector<Constant *>(ArraySize, C));
-}
+ Instruction *SubInst = cast<Instruction>(SubOneOp);
+ ConstantInt *Dec = dyn_cast<ConstantInt>(SubInst->getOperand(1));
+ if (!Dec ||
+ !((SubInst->getOpcode() == Instruction::Sub && Dec->isOne()) ||
+ (SubInst->getOpcode() == Instruction::Add &&
+ Dec->isAllOnesValue()))) {
+ return false;
+ }
+ }
-/// processLoopStridedStore - We see a strided store of some value. If we can
-/// transform this into a memset or memset_pattern in the loop preheader, do so.
-bool LoopIdiomRecognize::processLoopStridedStore(
- Value *DestPtr, unsigned StoreSize, unsigned StoreAlignment,
- Value *StoredVal, Instruction *TheStore, const SCEVAddRecExpr *Ev,
- const SCEV *BECount) {
+ // step 3: Check the recurrence of variable X
+ {
+ PhiX = dyn_cast<PHINode>(VarX1);
+ if (!PhiX ||
+ (PhiX->getOperand(0) != DefX2 && PhiX->getOperand(1) != DefX2)) {
+ return false;
+ }
+ }
- // If the stored value is a byte-wise value (like i32 -1), then it may be
- // turned into a memset of i8 -1, assuming that all the consecutive bytes
- // are stored. A store of i32 0x01020304 can never be turned into a memset,
- // but it can be turned into memset_pattern if the target supports it.
- Value *SplatValue = isBytewiseValue(StoredVal);
- Constant *PatternValue = nullptr;
- auto &DL = CurLoop->getHeader()->getModule()->getDataLayout();
- unsigned DestAS = DestPtr->getType()->getPointerAddressSpace();
+ // step 4: Find the instruction which count the population: cnt2 = cnt1 + 1
+ {
+ CountInst = nullptr;
+ for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI(),
+ IterE = LoopEntry->end();
+ Iter != IterE; Iter++) {
+ Instruction *Inst = Iter;
+ if (Inst->getOpcode() != Instruction::Add)
+ continue;
- // If we're allowed to form a memset, and the stored value would be acceptable
- // for memset, use it.
- if (SplatValue && TLI->has(LibFunc::memset) &&
- // Verify that the stored value is loop invariant. If not, we can't
- // promote the memset.
- CurLoop->isLoopInvariant(SplatValue)) {
- // Keep and use SplatValue.
- PatternValue = nullptr;
- } else if (DestAS == 0 && TLI->has(LibFunc::memset_pattern16) &&
- (PatternValue = getMemSetPatternValue(StoredVal, DL))) {
- // Don't create memset_pattern16s with address spaces.
- // It looks like we can use PatternValue!
- SplatValue = nullptr;
- } else {
- // Otherwise, this isn't an idiom we can transform. For example, we can't
- // do anything with a 3-byte store.
- return false;
+ ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1));
+ if (!Inc || !Inc->isOne())
+ continue;
+
+ PHINode *Phi = dyn_cast<PHINode>(Inst->getOperand(0));
+ if (!Phi || Phi->getParent() != LoopEntry)
+ continue;
+
+ // Check if the result of the instruction is live of the loop.
+ bool LiveOutLoop = false;
+ for (User *U : Inst->users()) {
+ if ((cast<Instruction>(U))->getParent() != LoopEntry) {
+ LiveOutLoop = true;
+ break;
+ }
+ }
+
+ if (LiveOutLoop) {
+ CountInst = Inst;
+ CountPhi = Phi;
+ break;
+ }
+ }
+
+ if (!CountInst)
+ return false;
}
- // The trip count of the loop and the base pointer of the addrec SCEV is
- // guaranteed to be loop invariant, which means that it should dominate the
- // header. This allows us to insert code for it in the preheader.
- BasicBlock *Preheader = CurLoop->getLoopPreheader();
- IRBuilder<> Builder(Preheader->getTerminator());
- SCEVExpander Expander(*SE, DL, "loop-idiom");
+ // step 5: check if the precondition is in this form:
+ // "if (x != 0) goto loop-head ; else goto somewhere-we-don't-care;"
+ {
+ auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator());
+ Value *T = matchCondition(PreCondBr, CurLoop->getLoopPreheader());
+ if (T != PhiX->getOperand(0) && T != PhiX->getOperand(1))
+ return false;
- Type *DestInt8PtrTy = Builder.getInt8PtrTy(DestAS);
+ CntInst = CountInst;
+ CntPhi = CountPhi;
+ Var = T;
+ }
+
+ return true;
+}
- // Okay, we have a strided store "p[i]" of a splattable value. We can turn
- // this into a memset in the loop preheader now if we want. However, this
- // would be unsafe to do if there is anything else in the loop that may read
- // or write to the aliased location. Check for any overlap by generating the
- // base pointer and checking the region.
- Value *BasePtr = Expander.expandCodeFor(Ev->getStart(), DestInt8PtrTy,
- Preheader->getTerminator());
+/// Recognizes a population count idiom in a non-countable loop.
+///
+/// If detected, transforms the relevant code to issue the popcount intrinsic
+/// function call, and returns true; otherwise, returns false.
+bool LoopIdiomRecognize::recognizePopcount() {
+ (void)getScalarEvolution();
+ (void)getTargetLibraryInfo();
+ (void)getTargetTransformInfo();
- if (mayLoopAccessLocation(BasePtr, MRI_ModRef, CurLoop, BECount, StoreSize,
- getAnalysis<AliasAnalysis>(), TheStore)) {
- Expander.clear();
- // If we generated new code for the base pointer, clean up.
- RecursivelyDeleteTriviallyDeadInstructions(BasePtr, TLI);
+ if (TTI->getPopcntSupport(32) != TargetTransformInfo::PSK_FastHardware)
return false;
- }
- // Okay, everything looks good, insert the memset.
+ // Counting population are usually conducted by few arithmetic instructions.
+ // Such instructions can be easilly "absorbed" by vacant slots in a
+ // non-compact loop. Therefore, recognizing popcount idiom only makes sense
+ // in a compact loop.
- // The # stored bytes is (BECount+1)*Size. Expand the trip count out to
- // pointer size if it isn't already.
- Type *IntPtr = Builder.getIntPtrTy(DL, DestAS);
- BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
+ // Give up if the loop has multiple blocks or multiple backedges.
+ if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
+ return false;
- const SCEV *NumBytesS =
- SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1), SCEV::FlagNUW);
- if (StoreSize != 1) {
- NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize),
- SCEV::FlagNUW);
+ BasicBlock *LoopBody = *(CurLoop->block_begin());
+ if (LoopBody->size() >= 20) {
+ // The loop is too big, bail out.
+ return false;
}
- Value *NumBytes =
- Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator());
+ // It should have a preheader containing nothing but an unconditional branch.
+ BasicBlock *PH = CurLoop->getLoopPreheader();
+ if (!PH)
+ return false;
+ if (&PH->front() != PH->getTerminator())
+ return false;
+ auto *EntryBI = dyn_cast<BranchInst>(PH->getTerminator());
+ if (!EntryBI || EntryBI->isConditional())
+ return false;
- CallInst *NewCall;
- if (SplatValue) {
- NewCall =
- Builder.CreateMemSet(BasePtr, SplatValue, NumBytes, StoreAlignment);
- } else {
- // Everything is emitted in default address space
- Type *Int8PtrTy = DestInt8PtrTy;
+ // It should have a precondition block where the generated popcount instrinsic
+ // function can be inserted.
+ auto *PreCondBB = PH->getSinglePredecessor();
+ if (!PreCondBB)
+ return false;
+ auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator());
+ if (!PreCondBI || PreCondBI->isUnconditional())
+ return false;
- Module *M = TheStore->getParent()->getParent()->getParent();
- Value *MSP =
- M->getOrInsertFunction("memset_pattern16", Builder.getVoidTy(),
- Int8PtrTy, Int8PtrTy, IntPtr, (void *)nullptr);
+ Instruction *CntInst;
+ PHINode *CntPhi;
+ Value *Val;
+ if (!detectPopcountIdiom(CurLoop, PreCondBB, CntInst, CntPhi, Val))
+ return false;
- // Otherwise we should form a memset_pattern16. PatternValue is known to be
- // an constant array of 16-bytes. Plop the value into a mergable global.
- GlobalVariable *GV = new GlobalVariable(*M, PatternValue->getType(), true,
- GlobalValue::PrivateLinkage,
- PatternValue, ".memset_pattern");
- GV->setUnnamedAddr(true); // Ok to merge these.
- GV->setAlignment(16);
- Value *PatternPtr = ConstantExpr::getBitCast(GV, Int8PtrTy);
- NewCall = Builder.CreateCall(MSP, {BasePtr, PatternPtr, NumBytes});
- }
+ transformLoopToPopcount(PreCondBB, CntInst, CntPhi, Val);
+ return true;
+}
- DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n"
- << " from store to: " << *Ev << " at: " << *TheStore << "\n");
- NewCall->setDebugLoc(TheStore->getDebugLoc());
+static CallInst *createPopcntIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
+ DebugLoc DL) {
+ Value *Ops[] = {Val};
+ Type *Tys[] = {Val->getType()};
- // Okay, the memset has been formed. Zap the original store and anything that
- // feeds into it.
- deleteDeadInstruction(TheStore, TLI);
- ++NumMemSet;
- return true;
+ Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent();
+ Value *Func = Intrinsic::getDeclaration(M, Intrinsic::ctpop, Tys);
+ CallInst *CI = IRBuilder.CreateCall(Func, Ops);
+ CI->setDebugLoc(DL);
+
+ return CI;
}
-/// processLoopStoreOfLoopLoad - We see a strided store whose value is a
-/// same-strided load.
-bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(
- StoreInst *SI, unsigned StoreSize, const SCEVAddRecExpr *StoreEv,
- const SCEVAddRecExpr *LoadEv, const SCEV *BECount) {
- // If we're not allowed to form memcpy, we fail.
- if (!TLI->has(LibFunc::memcpy))
- return false;
+void LoopIdiomRecognize::transformLoopToPopcount(BasicBlock *PreCondBB,
+ Instruction *CntInst,
+ PHINode *CntPhi, Value *Var) {
+ BasicBlock *PreHead = CurLoop->getLoopPreheader();
+ auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator());
+ const DebugLoc DL = CntInst->getDebugLoc();
- LoadInst *LI = cast<LoadInst>(SI->getValueOperand());
+ // Assuming before transformation, the loop is following:
+ // if (x) // the precondition
+ // do { cnt++; x &= x - 1; } while(x);
- // The trip count of the loop and the base pointer of the addrec SCEV is
- // guaranteed to be loop invariant, which means that it should dominate the
- // header. This allows us to insert code for it in the preheader.
- BasicBlock *Preheader = CurLoop->getLoopPreheader();
- IRBuilder<> Builder(Preheader->getTerminator());
- const DataLayout &DL = Preheader->getModule()->getDataLayout();
- SCEVExpander Expander(*SE, DL, "loop-idiom");
+ // Step 1: Insert the ctpop instruction at the end of the precondition block
+ IRBuilder<> Builder(PreCondBr);
+ Value *PopCnt, *PopCntZext, *NewCount, *TripCnt;
+ {
+ PopCnt = createPopcntIntrinsic(Builder, Var, DL);
+ NewCount = PopCntZext =
+ Builder.CreateZExtOrTrunc(PopCnt, cast<IntegerType>(CntPhi->getType()));
- // Okay, we have a strided store "p[i]" of a loaded value. We can turn
- // this into a memcpy in the loop preheader now if we want. However, this
- // would be unsafe to do if there is anything else in the loop that may read
- // or write the memory region we're storing to. This includes the load that
- // feeds the stores. Check for an alias by generating the base address and
- // checking everything.
- Value *StoreBasePtr = Expander.expandCodeFor(
- StoreEv->getStart(), Builder.getInt8PtrTy(SI->getPointerAddressSpace()),
- Preheader->getTerminator());
+ if (NewCount != PopCnt)
+ (cast<Instruction>(NewCount))->setDebugLoc(DL);
- if (mayLoopAccessLocation(StoreBasePtr, MRI_ModRef, CurLoop, BECount,
- StoreSize, getAnalysis<AliasAnalysis>(), SI)) {
- Expander.clear();
- // If we generated new code for the base pointer, clean up.
- RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI);
- return false;
+ // TripCnt is exactly the number of iterations the loop has
+ TripCnt = NewCount;
+
+ // If the population counter's initial value is not zero, insert Add Inst.
+ Value *CntInitVal = CntPhi->getIncomingValueForBlock(PreHead);
+ ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
+ if (!InitConst || !InitConst->isZero()) {
+ NewCount = Builder.CreateAdd(NewCount, CntInitVal);
+ (cast<Instruction>(NewCount))->setDebugLoc(DL);
+ }
}
- // For a memcpy, we have to make sure that the input array is not being
- // mutated by the loop.
- Value *LoadBasePtr = Expander.expandCodeFor(
- LoadEv->getStart(), Builder.getInt8PtrTy(LI->getPointerAddressSpace()),
- Preheader->getTerminator());
+ // Step 2: Replace the precondition from "if(x == 0) goto loop-exit" to
+ // "if(NewCount == 0) loop-exit". Withtout this change, the intrinsic
+ // function would be partial dead code, and downstream passes will drag
+ // it back from the precondition block to the preheader.
+ {
+ ICmpInst *PreCond = cast<ICmpInst>(PreCondBr->getCondition());
- if (mayLoopAccessLocation(LoadBasePtr, MRI_Mod, CurLoop, BECount, StoreSize,
- getAnalysis<AliasAnalysis>(), SI)) {
- Expander.clear();
- // If we generated new code for the base pointer, clean up.
- RecursivelyDeleteTriviallyDeadInstructions(LoadBasePtr, TLI);
- RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI);
- return false;
- }
+ Value *Opnd0 = PopCntZext;
+ Value *Opnd1 = ConstantInt::get(PopCntZext->getType(), 0);
+ if (PreCond->getOperand(0) != Var)
+ std::swap(Opnd0, Opnd1);
- // Okay, everything is safe, we can transform this!
+ ICmpInst *NewPreCond = cast<ICmpInst>(
+ Builder.CreateICmp(PreCond->getPredicate(), Opnd0, Opnd1));
+ PreCondBr->setCondition(NewPreCond);
- // The # stored bytes is (BECount+1)*Size. Expand the trip count out to
- // pointer size if it isn't already.
- Type *IntPtrTy = Builder.getIntPtrTy(DL, SI->getPointerAddressSpace());
- BECount = SE->getTruncateOrZeroExtend(BECount, IntPtrTy);
+ RecursivelyDeleteTriviallyDeadInstructions(PreCond, TLI);
+ }
- const SCEV *NumBytesS =
- SE->getAddExpr(BECount, SE->getConstant(IntPtrTy, 1), SCEV::FlagNUW);
- if (StoreSize != 1)
- NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtrTy, StoreSize),
- SCEV::FlagNUW);
+ // Step 3: Note that the population count is exactly the trip count of the
+ // loop in question, which enble us to to convert the loop from noncountable
+ // loop into a countable one. The benefit is twofold:
+ //
+ // - If the loop only counts population, the entire loop become dead after
+ // the transformation. It is lots easier to prove a countable loop dead
+ // than to prove a noncountable one. (In some C dialects, a infite loop
+ // isn't dead even if it computes nothing useful. In general, DCE needs
+ // to prove a noncountable loop finite before safely delete it.)
+ //
+ // - If the loop also performs something else, it remains alive.
+ // Since it is transformed to countable form, it can be aggressively
+ // optimized by some optimizations which are in general not applicable
+ // to a noncountable loop.
+ //
+ // After this step, this loop (conceptually) would look like following:
+ // newcnt = __builtin_ctpop(x);
+ // t = newcnt;
+ // if (x)
+ // do { cnt++; x &= x-1; t--) } while (t > 0);
+ BasicBlock *Body = *(CurLoop->block_begin());
+ {
+ auto *LbBr = dyn_cast<BranchInst>(Body->getTerminator());
+ ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition());
+ Type *Ty = TripCnt->getType();
- Value *NumBytes =
- Expander.expandCodeFor(NumBytesS, IntPtrTy, Preheader->getTerminator());
+ PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", Body->begin());
- CallInst *NewCall =
- Builder.CreateMemCpy(StoreBasePtr, LoadBasePtr, NumBytes,
- std::min(SI->getAlignment(), LI->getAlignment()));
- NewCall->setDebugLoc(SI->getDebugLoc());
+ Builder.SetInsertPoint(LbCond);
+ Value *Opnd1 = cast<Value>(TcPhi);
+ Value *Opnd2 = cast<Value>(ConstantInt::get(Ty, 1));
+ Instruction *TcDec = cast<Instruction>(
+ Builder.CreateSub(Opnd1, Opnd2, "tcdec", false, true));
- DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n"
- << " from load ptr=" << *LoadEv << " at: " << *LI << "\n"
- << " from store ptr=" << *StoreEv << " at: " << *SI << "\n");
+ TcPhi->addIncoming(TripCnt, PreHead);
+ TcPhi->addIncoming(TcDec, Body);
- // Okay, the memset has been formed. Zap the original store and anything that
- // feeds into it.
- deleteDeadInstruction(SI, TLI);
- ++NumMemCpy;
- return true;
-}
+ CmpInst::Predicate Pred =
+ (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_UGT : CmpInst::ICMP_SLE;
+ LbCond->setPredicate(Pred);
+ LbCond->setOperand(0, TcDec);
+ LbCond->setOperand(1, cast<Value>(ConstantInt::get(Ty, 0)));
+ }
-bool LoopIdiomRecognize::runOnNoncountableLoop() {
- NclPopcountRecognize Popcount(*this);
- if (Popcount.recognize())
- return true;
+ // Step 4: All the references to the original population counter outside
+ // the loop are replaced with the NewCount -- the value returned from
+ // __builtin_ctpop().
+ CntInst->replaceUsesOutsideBlock(NewCount, Body);
- return false;
+ // step 5: Forget the "non-computable" trip-count SCEV associated with the
+ // loop. The loop would otherwise not be deleted even if it becomes empty.
+ SE->forgetLoop(CurLoop);
}