1 //===- InstCombineInternal.h - InstCombine pass internals -------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This file provides internal interfaces used to implement the InstCombine.
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H
16 #define LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/Analysis/AssumptionCache.h"
20 #include "llvm/Analysis/LoopInfo.h"
21 #include "llvm/Analysis/TargetFolder.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/IR/Dominators.h"
24 #include "llvm/IR/IRBuilder.h"
25 #include "llvm/IR/InstVisitor.h"
26 #include "llvm/IR/IntrinsicInst.h"
27 #include "llvm/IR/Operator.h"
28 #include "llvm/IR/PatternMatch.h"
29 #include "llvm/Pass.h"
30 #include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
32 #define DEBUG_TYPE "instcombine"
38 class TargetLibraryInfo;
43 /// \brief Assign a complexity or rank value to LLVM Values.
45 /// This routine maps IR values to various complexity ranks:
48 /// 2 -> Other non-instructions
50 /// 3 -> Unary operations
51 /// 4 -> Other instructions
52 static inline unsigned getComplexity(Value *V) {
53 if (isa<Instruction>(V)) {
54 if (BinaryOperator::isNeg(V) || BinaryOperator::isFNeg(V) ||
55 BinaryOperator::isNot(V))
61 return isa<Constant>(V) ? (isa<UndefValue>(V) ? 0 : 1) : 2;
64 /// \brief Add one to a Constant
65 static inline Constant *AddOne(Constant *C) {
66 return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1));
68 /// \brief Subtract one from a Constant
69 static inline Constant *SubOne(Constant *C) {
70 return ConstantExpr::getSub(C, ConstantInt::get(C->getType(), 1));
73 /// \brief Return true if the specified value is free to invert (apply ~ to).
74 /// This happens in cases where the ~ can be eliminated. If WillInvertAllUses
75 /// is true, work under the assumption that the caller intends to remove all
76 /// uses of V and only keep uses of ~V.
78 static inline bool IsFreeToInvert(Value *V, bool WillInvertAllUses) {
80 if (BinaryOperator::isNot(V))
83 // Constants can be considered to be not'ed values.
84 if (isa<ConstantInt>(V))
87 // Compares can be inverted if all of their uses are being modified to use the
90 return WillInvertAllUses;
92 // If `V` is of the form `A + Constant` then `-1 - V` can be folded into `(-1
93 // - Constant) - A` if we are willing to invert all of the uses.
94 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(V))
95 if (BO->getOpcode() == Instruction::Add ||
96 BO->getOpcode() == Instruction::Sub)
97 if (isa<Constant>(BO->getOperand(0)) || isa<Constant>(BO->getOperand(1)))
98 return WillInvertAllUses;
104 /// \brief Specific patterns of overflow check idioms that we match.
105 enum OverflowCheckFlavor {
116 /// \brief Returns the OverflowCheckFlavor corresponding to a overflow_with_op
118 static inline OverflowCheckFlavor
119 IntrinsicIDToOverflowCheckFlavor(unsigned ID) {
123 case Intrinsic::uadd_with_overflow:
124 return OCF_UNSIGNED_ADD;
125 case Intrinsic::sadd_with_overflow:
126 return OCF_SIGNED_ADD;
127 case Intrinsic::usub_with_overflow:
128 return OCF_UNSIGNED_SUB;
129 case Intrinsic::ssub_with_overflow:
130 return OCF_SIGNED_SUB;
131 case Intrinsic::umul_with_overflow:
132 return OCF_UNSIGNED_MUL;
133 case Intrinsic::smul_with_overflow:
134 return OCF_SIGNED_MUL;
138 /// \brief An IRBuilder inserter that adds new instructions to the instcombine
140 class LLVM_LIBRARY_VISIBILITY InstCombineIRInserter
141 : public IRBuilderDefaultInserter<true> {
142 InstCombineWorklist &Worklist;
146 InstCombineIRInserter(InstCombineWorklist &WL, AssumptionCache *AC)
147 : Worklist(WL), AC(AC) {}
149 void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB,
150 BasicBlock::iterator InsertPt) const {
151 IRBuilderDefaultInserter<true>::InsertHelper(I, Name, BB, InsertPt);
154 using namespace llvm::PatternMatch;
155 if (match(I, m_Intrinsic<Intrinsic::assume>()))
156 AC->registerAssumption(cast<CallInst>(I));
160 /// \brief The core instruction combiner logic.
162 /// This class provides both the logic to recursively visit instructions and
163 /// combine them, as well as the pass infrastructure for running this as part
164 /// of the LLVM pass pipeline.
165 class LLVM_LIBRARY_VISIBILITY InstCombiner
166 : public InstVisitor<InstCombiner, Instruction *> {
167 // FIXME: These members shouldn't be public.
169 /// \brief A worklist of the instructions that need to be simplified.
170 InstCombineWorklist &Worklist;
172 /// \brief An IRBuilder that automatically inserts new instructions into the
174 typedef IRBuilder<true, TargetFolder, InstCombineIRInserter> BuilderTy;
178 // Mode in which we are running the combiner.
179 const bool MinimizeSize;
183 // Required analyses.
184 // FIXME: These can never be null and should be references.
186 TargetLibraryInfo *TLI;
188 const DataLayout &DL;
190 // Optional analyses. When non-null, these can both be used to do better
191 // combining and will be updated to reflect any changes.
197 InstCombiner(InstCombineWorklist &Worklist, BuilderTy *Builder,
198 bool MinimizeSize, AliasAnalysis *AA,
199 AssumptionCache *AC, TargetLibraryInfo *TLI,
200 DominatorTree *DT, const DataLayout &DL, LoopInfo *LI)
201 : Worklist(Worklist), Builder(Builder), MinimizeSize(MinimizeSize),
202 AA(AA), AC(AC), TLI(TLI), DT(DT), DL(DL), LI(LI), MadeIRChange(false) {}
204 /// \brief Run the combiner over the entire worklist until it is empty.
206 /// \returns true if the IR is changed.
209 AssumptionCache *getAssumptionCache() const { return AC; }
211 const DataLayout &getDataLayout() const { return DL; }
213 DominatorTree *getDominatorTree() const { return DT; }
215 LoopInfo *getLoopInfo() const { return LI; }
217 TargetLibraryInfo *getTargetLibraryInfo() const { return TLI; }
219 // Visitation implementation - Implement instruction combining for different
220 // instruction types. The semantics are as follows:
222 // null - No change was made
223 // I - Change was made, I is still valid, I may be dead though
224 // otherwise - Change was made, replace I with returned instruction
226 Instruction *visitAdd(BinaryOperator &I);
227 Instruction *visitFAdd(BinaryOperator &I);
228 Value *OptimizePointerDifference(Value *LHS, Value *RHS, Type *Ty);
229 Instruction *visitSub(BinaryOperator &I);
230 Instruction *visitFSub(BinaryOperator &I);
231 Instruction *visitMul(BinaryOperator &I);
232 Value *foldFMulConst(Instruction *FMulOrDiv, Constant *C,
233 Instruction *InsertBefore);
234 Instruction *visitFMul(BinaryOperator &I);
235 Instruction *visitURem(BinaryOperator &I);
236 Instruction *visitSRem(BinaryOperator &I);
237 Instruction *visitFRem(BinaryOperator &I);
238 bool SimplifyDivRemOfSelect(BinaryOperator &I);
239 Instruction *commonRemTransforms(BinaryOperator &I);
240 Instruction *commonIRemTransforms(BinaryOperator &I);
241 Instruction *commonDivTransforms(BinaryOperator &I);
242 Instruction *commonIDivTransforms(BinaryOperator &I);
243 Instruction *visitUDiv(BinaryOperator &I);
244 Instruction *visitSDiv(BinaryOperator &I);
245 Instruction *visitFDiv(BinaryOperator &I);
246 Value *simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1, bool Inverted);
247 Value *FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS);
248 Value *FoldAndOfFCmps(FCmpInst *LHS, FCmpInst *RHS);
249 Instruction *visitAnd(BinaryOperator &I);
250 Value *FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS, Instruction *CxtI);
251 Value *FoldOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS);
252 Instruction *FoldOrWithConstants(BinaryOperator &I, Value *Op, Value *A,
254 Instruction *FoldXorWithConstants(BinaryOperator &I, Value *Op, Value *A,
256 Instruction *visitOr(BinaryOperator &I);
257 Instruction *visitXor(BinaryOperator &I);
258 Instruction *visitShl(BinaryOperator &I);
259 Instruction *visitAShr(BinaryOperator &I);
260 Instruction *visitLShr(BinaryOperator &I);
261 Instruction *commonShiftTransforms(BinaryOperator &I);
262 Instruction *FoldFCmp_IntToFP_Cst(FCmpInst &I, Instruction *LHSI,
264 Instruction *FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
265 GlobalVariable *GV, CmpInst &ICI,
266 ConstantInt *AndCst = nullptr);
267 Instruction *visitFCmpInst(FCmpInst &I);
268 Instruction *visitICmpInst(ICmpInst &I);
269 Instruction *visitICmpInstWithCastAndCast(ICmpInst &ICI);
270 Instruction *visitICmpInstWithInstAndIntCst(ICmpInst &ICI, Instruction *LHS,
272 Instruction *FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
273 ConstantInt *DivRHS);
274 Instruction *FoldICmpShrCst(ICmpInst &ICI, BinaryOperator *DivI,
275 ConstantInt *DivRHS);
276 Instruction *FoldICmpCstShrCst(ICmpInst &I, Value *Op, Value *A,
277 ConstantInt *CI1, ConstantInt *CI2);
278 Instruction *FoldICmpCstShlCst(ICmpInst &I, Value *Op, Value *A,
279 ConstantInt *CI1, ConstantInt *CI2);
280 Instruction *FoldICmpAddOpCst(Instruction &ICI, Value *X, ConstantInt *CI,
281 ICmpInst::Predicate Pred);
282 Instruction *FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
283 ICmpInst::Predicate Cond, Instruction &I);
284 Instruction *FoldAllocaCmp(ICmpInst &ICI, AllocaInst *Alloca, Value *Other);
285 Instruction *FoldShiftByConstant(Value *Op0, Constant *Op1,
287 Instruction *commonCastTransforms(CastInst &CI);
288 Instruction *commonPointerCastTransforms(CastInst &CI);
289 Instruction *visitTrunc(TruncInst &CI);
290 Instruction *visitZExt(ZExtInst &CI);
291 Instruction *visitSExt(SExtInst &CI);
292 Instruction *visitFPTrunc(FPTruncInst &CI);
293 Instruction *visitFPExt(CastInst &CI);
294 Instruction *visitFPToUI(FPToUIInst &FI);
295 Instruction *visitFPToSI(FPToSIInst &FI);
296 Instruction *visitUIToFP(CastInst &CI);
297 Instruction *visitSIToFP(CastInst &CI);
298 Instruction *visitPtrToInt(PtrToIntInst &CI);
299 Instruction *visitIntToPtr(IntToPtrInst &CI);
300 Instruction *visitBitCast(BitCastInst &CI);
301 Instruction *visitAddrSpaceCast(AddrSpaceCastInst &CI);
302 Instruction *FoldSelectOpOp(SelectInst &SI, Instruction *TI, Instruction *FI);
303 Instruction *FoldSelectIntoOp(SelectInst &SI, Value *, Value *);
304 Instruction *FoldSPFofSPF(Instruction *Inner, SelectPatternFlavor SPF1,
305 Value *A, Value *B, Instruction &Outer,
306 SelectPatternFlavor SPF2, Value *C);
307 Instruction *FoldItoFPtoI(Instruction &FI);
308 Instruction *visitSelectInst(SelectInst &SI);
309 Instruction *visitSelectInstWithICmp(SelectInst &SI, ICmpInst *ICI);
310 Instruction *visitCallInst(CallInst &CI);
311 Instruction *visitInvokeInst(InvokeInst &II);
313 Instruction *SliceUpIllegalIntegerPHI(PHINode &PN);
314 Instruction *visitPHINode(PHINode &PN);
315 Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP);
316 Instruction *visitAllocaInst(AllocaInst &AI);
317 Instruction *visitAllocSite(Instruction &FI);
318 Instruction *visitFree(CallInst &FI);
319 Instruction *visitLoadInst(LoadInst &LI);
320 Instruction *visitStoreInst(StoreInst &SI);
321 Instruction *visitBranchInst(BranchInst &BI);
322 Instruction *visitSwitchInst(SwitchInst &SI);
323 Instruction *visitReturnInst(ReturnInst &RI);
324 Instruction *visitInsertValueInst(InsertValueInst &IV);
325 Instruction *visitInsertElementInst(InsertElementInst &IE);
326 Instruction *visitExtractElementInst(ExtractElementInst &EI);
327 Instruction *visitShuffleVectorInst(ShuffleVectorInst &SVI);
328 Instruction *visitExtractValueInst(ExtractValueInst &EV);
329 Instruction *visitLandingPadInst(LandingPadInst &LI);
331 // visitInstruction - Specify what to return for unhandled instructions...
332 Instruction *visitInstruction(Instruction &I) { return nullptr; }
334 // True when DB dominates all uses of DI execpt UI.
335 // UI must be in the same block as DI.
336 // The routine checks that the DI parent and DB are different.
337 bool dominatesAllUses(const Instruction *DI, const Instruction *UI,
338 const BasicBlock *DB) const;
340 // Replace select with select operand SIOpd in SI-ICmp sequence when possible
341 bool replacedSelectWithOperand(SelectInst *SI, const ICmpInst *Icmp,
342 const unsigned SIOpd);
345 bool ShouldChangeType(unsigned FromBitWidth, unsigned ToBitWidth) const;
346 bool ShouldChangeType(Type *From, Type *To) const;
347 Value *dyn_castNegVal(Value *V) const;
348 Value *dyn_castFNegVal(Value *V, bool NoSignedZero = false) const;
349 Type *FindElementAtOffset(PointerType *PtrTy, int64_t Offset,
350 SmallVectorImpl<Value *> &NewIndices);
351 Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI);
353 /// \brief Classify whether a cast is worth optimizing.
355 /// Returns true if the cast from "V to Ty" actually results in any code
356 /// being generated and is interesting to optimize out. If the cast can be
357 /// eliminated by some other simple transformation, we prefer to do the
358 /// simplification first.
359 bool ShouldOptimizeCast(Instruction::CastOps opcode, const Value *V,
362 /// \brief Try to optimize a sequence of instructions checking if an operation
363 /// on LHS and RHS overflows.
365 /// If this overflow check is done via one of the overflow check intrinsics,
366 /// then CtxI has to be the call instruction calling that intrinsic. If this
367 /// overflow check is done by arithmetic followed by a compare, then CtxI has
368 /// to be the arithmetic instruction.
370 /// If a simplification is possible, stores the simplified result of the
371 /// operation in OperationResult and result of the overflow check in
372 /// OverflowResult, and return true. If no simplification is possible,
374 bool OptimizeOverflowCheck(OverflowCheckFlavor OCF, Value *LHS, Value *RHS,
375 Instruction &CtxI, Value *&OperationResult,
376 Constant *&OverflowResult);
378 Instruction *visitCallSite(CallSite CS);
379 Instruction *tryOptimizeCall(CallInst *CI);
380 bool transformConstExprCastCall(CallSite CS);
381 Instruction *transformCallThroughTrampoline(CallSite CS,
382 IntrinsicInst *Tramp);
383 Instruction *transformZExtICmp(ICmpInst *ICI, Instruction &CI,
384 bool DoXform = true);
385 Instruction *transformSExtICmp(ICmpInst *ICI, Instruction &CI);
386 bool WillNotOverflowSignedAdd(Value *LHS, Value *RHS, Instruction &CxtI);
387 bool WillNotOverflowSignedSub(Value *LHS, Value *RHS, Instruction &CxtI);
388 bool WillNotOverflowUnsignedSub(Value *LHS, Value *RHS, Instruction &CxtI);
389 bool WillNotOverflowSignedMul(Value *LHS, Value *RHS, Instruction &CxtI);
390 Value *EmitGEPOffset(User *GEP);
391 Instruction *scalarizePHI(ExtractElementInst &EI, PHINode *PN);
392 Value *EvaluateInDifferentElementOrder(Value *V, ArrayRef<int> Mask);
395 /// \brief Inserts an instruction \p New before instruction \p Old
397 /// Also adds the new instruction to the worklist and returns \p New so that
398 /// it is suitable for use as the return from the visitation patterns.
399 Instruction *InsertNewInstBefore(Instruction *New, Instruction &Old) {
400 assert(New && !New->getParent() &&
401 "New instruction already inserted into a basic block!");
402 BasicBlock *BB = Old.getParent();
403 BB->getInstList().insert(Old.getIterator(), New); // Insert inst
408 /// \brief Same as InsertNewInstBefore, but also sets the debug loc.
409 Instruction *InsertNewInstWith(Instruction *New, Instruction &Old) {
410 New->setDebugLoc(Old.getDebugLoc());
411 return InsertNewInstBefore(New, Old);
414 /// \brief A combiner-aware RAUW-like routine.
416 /// This method is to be used when an instruction is found to be dead,
417 /// replaceable with another preexisting expression. Here we add all uses of
418 /// I to the worklist, replace all uses of I with the new value, then return
419 /// I, so that the inst combiner will know that I was modified.
420 Instruction *ReplaceInstUsesWith(Instruction &I, Value *V) {
421 // If there are no uses to replace, then we return nullptr to indicate that
422 // no changes were made to the program.
423 if (I.use_empty()) return nullptr;
425 Worklist.AddUsersToWorkList(I); // Add all modified instrs to worklist.
427 // If we are replacing the instruction with itself, this must be in a
428 // segment of unreachable code, so just clobber the instruction.
430 V = UndefValue::get(I.getType());
432 DEBUG(dbgs() << "IC: Replacing " << I << "\n"
433 << " with " << *V << '\n');
435 I.replaceAllUsesWith(V);
439 /// Creates a result tuple for an overflow intrinsic \p II with a given
440 /// \p Result and a constant \p Overflow value.
441 Instruction *CreateOverflowTuple(IntrinsicInst *II, Value *Result,
442 Constant *Overflow) {
443 Constant *V[] = {UndefValue::get(Result->getType()), Overflow};
444 StructType *ST = cast<StructType>(II->getType());
445 Constant *Struct = ConstantStruct::get(ST, V);
446 return InsertValueInst::Create(Struct, Result, 0);
449 /// \brief Combiner aware instruction erasure.
451 /// When dealing with an instruction that has side effects or produces a void
452 /// value, we can't rely on DCE to delete the instruction. Instead, visit
453 /// methods should return the value returned by this function.
454 Instruction *EraseInstFromFunction(Instruction &I) {
455 DEBUG(dbgs() << "IC: ERASE " << I << '\n');
457 assert(I.use_empty() && "Cannot erase instruction that is used!");
458 // Make sure that we reprocess all operands now that we reduced their
460 if (I.getNumOperands() < 8) {
461 for (User::op_iterator i = I.op_begin(), e = I.op_end(); i != e; ++i)
462 if (Instruction *Op = dyn_cast<Instruction>(*i))
468 return nullptr; // Don't do anything with FI
471 void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
472 unsigned Depth, Instruction *CxtI) const {
473 return llvm::computeKnownBits(V, KnownZero, KnownOne, DL, Depth, AC, CxtI,
477 bool MaskedValueIsZero(Value *V, const APInt &Mask, unsigned Depth = 0,
478 Instruction *CxtI = nullptr) const {
479 return llvm::MaskedValueIsZero(V, Mask, DL, Depth, AC, CxtI, DT);
481 unsigned ComputeNumSignBits(Value *Op, unsigned Depth = 0,
482 Instruction *CxtI = nullptr) const {
483 return llvm::ComputeNumSignBits(Op, DL, Depth, AC, CxtI, DT);
485 void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
486 unsigned Depth = 0, Instruction *CxtI = nullptr) const {
487 return llvm::ComputeSignBit(V, KnownZero, KnownOne, DL, Depth, AC, CxtI,
490 OverflowResult computeOverflowForUnsignedMul(Value *LHS, Value *RHS,
491 const Instruction *CxtI) {
492 return llvm::computeOverflowForUnsignedMul(LHS, RHS, DL, AC, CxtI, DT);
494 OverflowResult computeOverflowForUnsignedAdd(Value *LHS, Value *RHS,
495 const Instruction *CxtI) {
496 return llvm::computeOverflowForUnsignedAdd(LHS, RHS, DL, AC, CxtI, DT);
500 /// \brief Performs a few simplifications for operators which are associative
502 bool SimplifyAssociativeOrCommutative(BinaryOperator &I);
504 /// \brief Tries to simplify binary operations which some other binary
505 /// operation distributes over.
507 /// It does this by either by factorizing out common terms (eg "(A*B)+(A*C)"
508 /// -> "A*(B+C)") or expanding out if this results in simplifications (eg: "A
509 /// & (B | C) -> (A&B) | (A&C)" if this is a win). Returns the simplified
510 /// value, or null if it didn't simplify.
511 Value *SimplifyUsingDistributiveLaws(BinaryOperator &I);
513 /// \brief Attempts to replace V with a simpler value based on the demanded
515 Value *SimplifyDemandedUseBits(Value *V, APInt DemandedMask, APInt &KnownZero,
516 APInt &KnownOne, unsigned Depth,
518 bool SimplifyDemandedBits(Use &U, APInt DemandedMask, APInt &KnownZero,
519 APInt &KnownOne, unsigned Depth = 0);
520 /// Helper routine of SimplifyDemandedUseBits. It tries to simplify demanded
521 /// bit for "r1 = shr x, c1; r2 = shl r1, c2" instruction sequence.
522 Value *SimplifyShrShlDemandedBits(Instruction *Lsr, Instruction *Sftl,
523 APInt DemandedMask, APInt &KnownZero,
526 /// \brief Tries to simplify operands to an integer instruction based on its
528 bool SimplifyDemandedInstructionBits(Instruction &Inst);
530 Value *SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
531 APInt &UndefElts, unsigned Depth = 0);
533 Value *SimplifyVectorOp(BinaryOperator &Inst);
534 Value *SimplifyBSwap(BinaryOperator &Inst);
536 // FoldOpIntoPhi - Given a binary operator, cast instruction, or select
537 // which has a PHI node as operand #0, see if we can fold the instruction
538 // into the PHI (which is only possible if all operands to the PHI are
541 Instruction *FoldOpIntoPhi(Instruction &I);
543 /// \brief Try to rotate an operation below a PHI node, using PHI nodes for
545 Instruction *FoldPHIArgOpIntoPHI(PHINode &PN);
546 Instruction *FoldPHIArgBinOpIntoPHI(PHINode &PN);
547 Instruction *FoldPHIArgGEPIntoPHI(PHINode &PN);
548 Instruction *FoldPHIArgLoadIntoPHI(PHINode &PN);
549 Instruction *FoldPHIArgZextsIntoPHI(PHINode &PN);
551 Instruction *OptAndOp(Instruction *Op, ConstantInt *OpRHS,
552 ConstantInt *AndRHS, BinaryOperator &TheAnd);
554 Value *FoldLogicalPlusAnd(Value *LHS, Value *RHS, ConstantInt *Mask,
555 bool isSub, Instruction &I);
556 Value *InsertRangeTest(Value *V, Constant *Lo, Constant *Hi, bool isSigned,
558 Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocaInst &AI);
559 Instruction *MatchBSwapOrBitReverse(BinaryOperator &I);
560 bool SimplifyStoreAtEndOfBlock(StoreInst &SI);
561 Instruction *SimplifyMemTransfer(MemIntrinsic *MI);
562 Instruction *SimplifyMemSet(MemSetInst *MI);
564 Value *EvaluateInDifferentType(Value *V, Type *Ty, bool isSigned);
566 /// \brief Returns a value X such that Val = X * Scale, or null if none.
568 /// If the multiplication is known not to overflow then NoSignedWrap is set.
569 Value *Descale(Value *Val, APInt Scale, bool &NoSignedWrap);
572 } // end namespace llvm.