1 //===- InstructionCombining.cpp - Combine multiple instructions -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // InstructionCombining - Combine instructions to form fewer, simple
11 // instructions. This pass does not modify the CFG. This pass is where
12 // algebraic simplification happens.
14 // This pass combines things like:
20 // This is a simple worklist driven algorithm.
22 // This pass guarantees that the following canonicalizations are performed on
24 // 1. If a binary operator has a constant operand, it is moved to the RHS
25 // 2. Bitwise operators with constant operands are always grouped so that
26 // shifts are performed first, then or's, then and's, then xor's.
27 // 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
28 // 4. All cmp instructions on boolean values are replaced with logical ops
29 // 5. add X, X is represented as (X*2) => (X << 1)
30 // 6. Multiplies with a power-of-two constant argument are transformed into
34 //===----------------------------------------------------------------------===//
36 #define DEBUG_TYPE "instcombine"
37 #include "llvm/Transforms/Scalar.h"
38 #include "llvm/IntrinsicInst.h"
39 #include "llvm/LLVMContext.h"
40 #include "llvm/Pass.h"
41 #include "llvm/DerivedTypes.h"
42 #include "llvm/GlobalVariable.h"
43 #include "llvm/Operator.h"
44 #include "llvm/Analysis/ConstantFolding.h"
45 #include "llvm/Analysis/InstructionSimplify.h"
46 #include "llvm/Analysis/MemoryBuiltins.h"
47 #include "llvm/Analysis/ValueTracking.h"
48 #include "llvm/Target/TargetData.h"
49 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
50 #include "llvm/Transforms/Utils/Local.h"
51 #include "llvm/Support/CallSite.h"
52 #include "llvm/Support/ConstantRange.h"
53 #include "llvm/Support/Debug.h"
54 #include "llvm/Support/ErrorHandling.h"
55 #include "llvm/Support/GetElementPtrTypeIterator.h"
56 #include "llvm/Support/InstVisitor.h"
57 #include "llvm/Support/IRBuilder.h"
58 #include "llvm/Support/MathExtras.h"
59 #include "llvm/Support/PatternMatch.h"
60 #include "llvm/Support/TargetFolder.h"
61 #include "llvm/Support/raw_ostream.h"
62 #include "llvm/ADT/DenseMap.h"
63 #include "llvm/ADT/SmallVector.h"
64 #include "llvm/ADT/SmallPtrSet.h"
65 #include "llvm/ADT/Statistic.h"
66 #include "llvm/ADT/STLExtras.h"
70 using namespace llvm::PatternMatch;
72 STATISTIC(NumCombined , "Number of insts combined");
73 STATISTIC(NumConstProp, "Number of constant folds");
74 STATISTIC(NumDeadInst , "Number of dead inst eliminated");
75 STATISTIC(NumDeadStore, "Number of dead stores eliminated");
76 STATISTIC(NumSunkInst , "Number of instructions sunk");
79 /// InstCombineWorklist - This is the worklist management logic for
81 class InstCombineWorklist {
82 SmallVector<Instruction*, 256> Worklist;
83 DenseMap<Instruction*, unsigned> WorklistMap;
85 void operator=(const InstCombineWorklist&RHS); // DO NOT IMPLEMENT
86 InstCombineWorklist(const InstCombineWorklist&); // DO NOT IMPLEMENT
88 InstCombineWorklist() {}
90 bool isEmpty() const { return Worklist.empty(); }
92 /// Add - Add the specified instruction to the worklist if it isn't already
94 void Add(Instruction *I) {
95 if (WorklistMap.insert(std::make_pair(I, Worklist.size())).second) {
96 DEBUG(errs() << "IC: ADD: " << *I << '\n');
97 Worklist.push_back(I);
101 void AddValue(Value *V) {
102 if (Instruction *I = dyn_cast<Instruction>(V))
106 /// AddInitialGroup - Add the specified batch of stuff in reverse order.
107 /// which should only be done when the worklist is empty and when the group
108 /// has no duplicates.
109 void AddInitialGroup(Instruction *const *List, unsigned NumEntries) {
110 assert(Worklist.empty() && "Worklist must be empty to add initial group");
111 Worklist.reserve(NumEntries+16);
112 DEBUG(errs() << "IC: ADDING: " << NumEntries << " instrs to worklist\n");
113 for (; NumEntries; --NumEntries) {
114 Instruction *I = List[NumEntries-1];
115 WorklistMap.insert(std::make_pair(I, Worklist.size()));
116 Worklist.push_back(I);
120 // Remove - remove I from the worklist if it exists.
121 void Remove(Instruction *I) {
122 DenseMap<Instruction*, unsigned>::iterator It = WorklistMap.find(I);
123 if (It == WorklistMap.end()) return; // Not in worklist.
125 // Don't bother moving everything down, just null out the slot.
126 Worklist[It->second] = 0;
128 WorklistMap.erase(It);
131 Instruction *RemoveOne() {
132 Instruction *I = Worklist.back();
134 WorklistMap.erase(I);
138 /// AddUsersToWorkList - When an instruction is simplified, add all users of
139 /// the instruction to the work lists because they might get more simplified
142 void AddUsersToWorkList(Instruction &I) {
143 for (Value::use_iterator UI = I.use_begin(), UE = I.use_end();
145 Add(cast<Instruction>(*UI));
149 /// Zap - check that the worklist is empty and nuke the backing store for
150 /// the map if it is large.
152 assert(WorklistMap.empty() && "Worklist empty, but map not?");
154 // Do an explicit clear, this shrinks the map if needed.
158 } // end anonymous namespace.
162 /// InstCombineIRInserter - This is an IRBuilder insertion helper that works
163 /// just like the normal insertion helper, but also adds any new instructions
164 /// to the instcombine worklist.
165 class InstCombineIRInserter : public IRBuilderDefaultInserter<true> {
166 InstCombineWorklist &Worklist;
168 InstCombineIRInserter(InstCombineWorklist &WL) : Worklist(WL) {}
170 void InsertHelper(Instruction *I, const Twine &Name,
171 BasicBlock *BB, BasicBlock::iterator InsertPt) const {
172 IRBuilderDefaultInserter<true>::InsertHelper(I, Name, BB, InsertPt);
176 } // end anonymous namespace
180 class InstCombiner : public FunctionPass,
181 public InstVisitor<InstCombiner, Instruction*> {
183 bool MustPreserveLCSSA;
186 /// Worklist - All of the instructions that need to be simplified.
187 InstCombineWorklist Worklist;
189 /// Builder - This is an IRBuilder that automatically inserts new
190 /// instructions into the worklist when they are created.
191 typedef IRBuilder<true, TargetFolder, InstCombineIRInserter> BuilderTy;
194 static char ID; // Pass identification, replacement for typeid
195 InstCombiner() : FunctionPass(&ID), TD(0), Builder(0) {}
197 LLVMContext *Context;
198 LLVMContext *getContext() const { return Context; }
201 virtual bool runOnFunction(Function &F);
203 bool DoOneIteration(Function &F, unsigned ItNum);
205 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
206 AU.addPreservedID(LCSSAID);
207 AU.setPreservesCFG();
210 TargetData *getTargetData() const { return TD; }
212 // Visitation implementation - Implement instruction combining for different
213 // instruction types. The semantics are as follows:
215 // null - No change was made
216 // I - Change was made, I is still valid, I may be dead though
217 // otherwise - Change was made, replace I with returned instruction
219 Instruction *visitAdd(BinaryOperator &I);
220 Instruction *visitFAdd(BinaryOperator &I);
221 Value *OptimizePointerDifference(Value *LHS, Value *RHS, const Type *Ty);
222 Instruction *visitSub(BinaryOperator &I);
223 Instruction *visitFSub(BinaryOperator &I);
224 Instruction *visitMul(BinaryOperator &I);
225 Instruction *visitFMul(BinaryOperator &I);
226 Instruction *visitURem(BinaryOperator &I);
227 Instruction *visitSRem(BinaryOperator &I);
228 Instruction *visitFRem(BinaryOperator &I);
229 bool SimplifyDivRemOfSelect(BinaryOperator &I);
230 Instruction *commonRemTransforms(BinaryOperator &I);
231 Instruction *commonIRemTransforms(BinaryOperator &I);
232 Instruction *commonDivTransforms(BinaryOperator &I);
233 Instruction *commonIDivTransforms(BinaryOperator &I);
234 Instruction *visitUDiv(BinaryOperator &I);
235 Instruction *visitSDiv(BinaryOperator &I);
236 Instruction *visitFDiv(BinaryOperator &I);
237 Instruction *FoldAndOfICmps(Instruction &I, ICmpInst *LHS, ICmpInst *RHS);
238 Instruction *FoldAndOfFCmps(Instruction &I, FCmpInst *LHS, FCmpInst *RHS);
239 Instruction *visitAnd(BinaryOperator &I);
240 Instruction *FoldOrOfICmps(Instruction &I, ICmpInst *LHS, ICmpInst *RHS);
241 Instruction *FoldOrOfFCmps(Instruction &I, FCmpInst *LHS, FCmpInst *RHS);
242 Instruction *FoldOrWithConstants(BinaryOperator &I, Value *Op,
243 Value *A, Value *B, Value *C);
244 Instruction *visitOr (BinaryOperator &I);
245 Instruction *visitXor(BinaryOperator &I);
246 Instruction *visitShl(BinaryOperator &I);
247 Instruction *visitAShr(BinaryOperator &I);
248 Instruction *visitLShr(BinaryOperator &I);
249 Instruction *commonShiftTransforms(BinaryOperator &I);
250 Instruction *FoldFCmp_IntToFP_Cst(FCmpInst &I, Instruction *LHSI,
252 Instruction *visitFCmpInst(FCmpInst &I);
253 Instruction *visitICmpInst(ICmpInst &I);
254 Instruction *visitICmpInstWithCastAndCast(ICmpInst &ICI);
255 Instruction *visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
258 Instruction *FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
259 ConstantInt *DivRHS);
261 Instruction *FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
262 ICmpInst::Predicate Cond, Instruction &I);
263 Instruction *FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
265 Instruction *commonCastTransforms(CastInst &CI);
266 Instruction *commonIntCastTransforms(CastInst &CI);
267 Instruction *commonPointerCastTransforms(CastInst &CI);
268 Instruction *visitTrunc(TruncInst &CI);
269 Instruction *visitZExt(ZExtInst &CI);
270 Instruction *visitSExt(SExtInst &CI);
271 Instruction *visitFPTrunc(FPTruncInst &CI);
272 Instruction *visitFPExt(CastInst &CI);
273 Instruction *visitFPToUI(FPToUIInst &FI);
274 Instruction *visitFPToSI(FPToSIInst &FI);
275 Instruction *visitUIToFP(CastInst &CI);
276 Instruction *visitSIToFP(CastInst &CI);
277 Instruction *visitPtrToInt(PtrToIntInst &CI);
278 Instruction *visitIntToPtr(IntToPtrInst &CI);
279 Instruction *visitBitCast(BitCastInst &CI);
280 Instruction *FoldSelectOpOp(SelectInst &SI, Instruction *TI,
282 Instruction *FoldSelectIntoOp(SelectInst &SI, Value*, Value*);
283 Instruction *visitSelectInst(SelectInst &SI);
284 Instruction *visitSelectInstWithICmp(SelectInst &SI, ICmpInst *ICI);
285 Instruction *visitCallInst(CallInst &CI);
286 Instruction *visitInvokeInst(InvokeInst &II);
288 Instruction *SliceUpIllegalIntegerPHI(PHINode &PN);
289 Instruction *visitPHINode(PHINode &PN);
290 Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP);
291 Instruction *visitAllocaInst(AllocaInst &AI);
292 Instruction *visitFree(Instruction &FI);
293 Instruction *visitLoadInst(LoadInst &LI);
294 Instruction *visitStoreInst(StoreInst &SI);
295 Instruction *visitBranchInst(BranchInst &BI);
296 Instruction *visitSwitchInst(SwitchInst &SI);
297 Instruction *visitInsertElementInst(InsertElementInst &IE);
298 Instruction *visitExtractElementInst(ExtractElementInst &EI);
299 Instruction *visitShuffleVectorInst(ShuffleVectorInst &SVI);
300 Instruction *visitExtractValueInst(ExtractValueInst &EV);
302 // visitInstruction - Specify what to return for unhandled instructions...
303 Instruction *visitInstruction(Instruction &I) { return 0; }
306 Instruction *visitCallSite(CallSite CS);
307 bool transformConstExprCastCall(CallSite CS);
308 Instruction *transformCallThroughTrampoline(CallSite CS);
309 Instruction *transformZExtICmp(ICmpInst *ICI, Instruction &CI,
310 bool DoXform = true);
311 bool WillNotOverflowSignedAdd(Value *LHS, Value *RHS);
312 DbgDeclareInst *hasOneUsePlusDeclare(Value *V);
316 // InsertNewInstBefore - insert an instruction New before instruction Old
317 // in the program. Add the new instruction to the worklist.
319 Instruction *InsertNewInstBefore(Instruction *New, Instruction &Old) {
320 assert(New && New->getParent() == 0 &&
321 "New instruction already inserted into a basic block!");
322 BasicBlock *BB = Old.getParent();
323 BB->getInstList().insert(&Old, New); // Insert inst
328 // ReplaceInstUsesWith - This method is to be used when an instruction is
329 // found to be dead, replacable with another preexisting expression. Here
330 // we add all uses of I to the worklist, replace all uses of I with the new
331 // value, then return I, so that the inst combiner will know that I was
334 Instruction *ReplaceInstUsesWith(Instruction &I, Value *V) {
335 Worklist.AddUsersToWorkList(I); // Add all modified instrs to worklist.
337 // If we are replacing the instruction with itself, this must be in a
338 // segment of unreachable code, so just clobber the instruction.
340 V = UndefValue::get(I.getType());
342 I.replaceAllUsesWith(V);
346 // EraseInstFromFunction - When dealing with an instruction that has side
347 // effects or produces a void value, we can't rely on DCE to delete the
348 // instruction. Instead, visit methods should return the value returned by
350 Instruction *EraseInstFromFunction(Instruction &I) {
351 DEBUG(errs() << "IC: ERASE " << I << '\n');
353 assert(I.use_empty() && "Cannot erase instruction that is used!");
354 // Make sure that we reprocess all operands now that we reduced their
356 if (I.getNumOperands() < 8) {
357 for (User::op_iterator i = I.op_begin(), e = I.op_end(); i != e; ++i)
358 if (Instruction *Op = dyn_cast<Instruction>(*i))
364 return 0; // Don't do anything with FI
367 void ComputeMaskedBits(Value *V, const APInt &Mask, APInt &KnownZero,
368 APInt &KnownOne, unsigned Depth = 0) const {
369 return llvm::ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD, Depth);
372 bool MaskedValueIsZero(Value *V, const APInt &Mask,
373 unsigned Depth = 0) const {
374 return llvm::MaskedValueIsZero(V, Mask, TD, Depth);
376 unsigned ComputeNumSignBits(Value *Op, unsigned Depth = 0) const {
377 return llvm::ComputeNumSignBits(Op, TD, Depth);
382 /// SimplifyCommutative - This performs a few simplifications for
383 /// commutative operators.
384 bool SimplifyCommutative(BinaryOperator &I);
386 /// SimplifyDemandedUseBits - Attempts to replace V with a simpler value
387 /// based on the demanded bits.
388 Value *SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
389 APInt& KnownZero, APInt& KnownOne,
391 bool SimplifyDemandedBits(Use &U, APInt DemandedMask,
392 APInt& KnownZero, APInt& KnownOne,
395 /// SimplifyDemandedInstructionBits - Inst is an integer instruction that
396 /// SimplifyDemandedBits knows about. See if the instruction has any
397 /// properties that allow us to simplify its operands.
398 bool SimplifyDemandedInstructionBits(Instruction &Inst);
400 Value *SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
401 APInt& UndefElts, unsigned Depth = 0);
403 // FoldOpIntoPhi - Given a binary operator, cast instruction, or select
404 // which has a PHI node as operand #0, see if we can fold the instruction
405 // into the PHI (which is only possible if all operands to the PHI are
408 // If AllowAggressive is true, FoldOpIntoPhi will allow certain transforms
409 // that would normally be unprofitable because they strongly encourage jump
411 Instruction *FoldOpIntoPhi(Instruction &I, bool AllowAggressive = false);
413 // FoldPHIArgOpIntoPHI - If all operands to a PHI node are the same "unary"
414 // operator and they all are only used by the PHI, PHI together their
415 // inputs, and do the operation once, to the result of the PHI.
416 Instruction *FoldPHIArgOpIntoPHI(PHINode &PN);
417 Instruction *FoldPHIArgBinOpIntoPHI(PHINode &PN);
418 Instruction *FoldPHIArgGEPIntoPHI(PHINode &PN);
419 Instruction *FoldPHIArgLoadIntoPHI(PHINode &PN);
422 Instruction *OptAndOp(Instruction *Op, ConstantInt *OpRHS,
423 ConstantInt *AndRHS, BinaryOperator &TheAnd);
425 Value *FoldLogicalPlusAnd(Value *LHS, Value *RHS, ConstantInt *Mask,
426 bool isSub, Instruction &I);
427 Instruction *InsertRangeTest(Value *V, Constant *Lo, Constant *Hi,
428 bool isSigned, bool Inside, Instruction &IB);
429 Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocaInst &AI);
430 Instruction *MatchBSwap(BinaryOperator &I);
431 bool SimplifyStoreAtEndOfBlock(StoreInst &SI);
432 Instruction *SimplifyMemTransfer(MemIntrinsic *MI);
433 Instruction *SimplifyMemSet(MemSetInst *MI);
436 Value *EvaluateInDifferentType(Value *V, const Type *Ty, bool isSigned);
438 bool CanEvaluateInDifferentType(Value *V, const Type *Ty,
439 unsigned CastOpc, int &NumCastsRemoved);
440 unsigned GetOrEnforceKnownAlignment(Value *V,
441 unsigned PrefAlign = 0);
444 } // end anonymous namespace
446 char InstCombiner::ID = 0;
447 static RegisterPass<InstCombiner>
448 X("instcombine", "Combine redundant instructions");
450 // getComplexity: Assign a complexity or rank value to LLVM Values...
451 // 0 -> undef, 1 -> Const, 2 -> Other, 3 -> Arg, 3 -> Unary, 4 -> OtherInst
452 static unsigned getComplexity(Value *V) {
453 if (isa<Instruction>(V)) {
454 if (BinaryOperator::isNeg(V) ||
455 BinaryOperator::isFNeg(V) ||
456 BinaryOperator::isNot(V))
460 if (isa<Argument>(V)) return 3;
461 return isa<Constant>(V) ? (isa<UndefValue>(V) ? 0 : 1) : 2;
464 // isOnlyUse - Return true if this instruction will be deleted if we stop using
466 static bool isOnlyUse(Value *V) {
467 return V->hasOneUse() || isa<Constant>(V);
470 // getPromotedType - Return the specified type promoted as it would be to pass
471 // though a va_arg area...
472 static const Type *getPromotedType(const Type *Ty) {
473 if (const IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
474 if (ITy->getBitWidth() < 32)
475 return Type::getInt32Ty(Ty->getContext());
480 /// ShouldChangeType - Return true if it is desirable to convert a computation
481 /// from 'From' to 'To'. We don't want to convert from a legal to an illegal
482 /// type for example, or from a smaller to a larger illegal type.
483 static bool ShouldChangeType(const Type *From, const Type *To,
484 const TargetData *TD) {
485 assert(isa<IntegerType>(From) && isa<IntegerType>(To));
487 // If we don't have TD, we don't know if the source/dest are legal.
488 if (!TD) return false;
490 unsigned FromWidth = From->getPrimitiveSizeInBits();
491 unsigned ToWidth = To->getPrimitiveSizeInBits();
492 bool FromLegal = TD->isLegalInteger(FromWidth);
493 bool ToLegal = TD->isLegalInteger(ToWidth);
495 // If this is a legal integer from type, and the result would be an illegal
496 // type, don't do the transformation.
497 if (FromLegal && !ToLegal)
500 // Otherwise, if both are illegal, do not increase the size of the result. We
501 // do allow things like i160 -> i64, but not i64 -> i160.
502 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
508 /// getBitCastOperand - If the specified operand is a CastInst, a constant
509 /// expression bitcast, or a GetElementPtrInst with all zero indices, return the
510 /// operand value, otherwise return null.
511 static Value *getBitCastOperand(Value *V) {
512 if (Operator *O = dyn_cast<Operator>(V)) {
513 if (O->getOpcode() == Instruction::BitCast)
514 return O->getOperand(0);
515 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V))
516 if (GEP->hasAllZeroIndices())
517 return GEP->getPointerOperand();
522 /// This function is a wrapper around CastInst::isEliminableCastPair. It
523 /// simply extracts arguments and returns what that function returns.
524 static Instruction::CastOps
525 isEliminableCastPair(
526 const CastInst *CI, ///< The first cast instruction
527 unsigned opcode, ///< The opcode of the second cast instruction
528 const Type *DstTy, ///< The target type for the second cast instruction
529 TargetData *TD ///< The target data for pointer size
532 const Type *SrcTy = CI->getOperand(0)->getType(); // A from above
533 const Type *MidTy = CI->getType(); // B from above
535 // Get the opcodes of the two Cast instructions
536 Instruction::CastOps firstOp = Instruction::CastOps(CI->getOpcode());
537 Instruction::CastOps secondOp = Instruction::CastOps(opcode);
539 unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy,
541 TD ? TD->getIntPtrType(CI->getContext()) : 0);
543 // We don't want to form an inttoptr or ptrtoint that converts to an integer
544 // type that differs from the pointer size.
545 if ((Res == Instruction::IntToPtr &&
546 (!TD || SrcTy != TD->getIntPtrType(CI->getContext()))) ||
547 (Res == Instruction::PtrToInt &&
548 (!TD || DstTy != TD->getIntPtrType(CI->getContext()))))
551 return Instruction::CastOps(Res);
554 /// ValueRequiresCast - Return true if the cast from "V to Ty" actually results
555 /// in any code being generated. It does not require codegen if V is simple
556 /// enough or if the cast can be folded into other casts.
557 static bool ValueRequiresCast(Instruction::CastOps opcode, const Value *V,
558 const Type *Ty, TargetData *TD) {
559 if (V->getType() == Ty || isa<Constant>(V)) return false;
561 // If this is another cast that can be eliminated, it isn't codegen either.
562 if (const CastInst *CI = dyn_cast<CastInst>(V))
563 if (isEliminableCastPair(CI, opcode, Ty, TD))
568 // SimplifyCommutative - This performs a few simplifications for commutative
571 // 1. Order operands such that they are listed from right (least complex) to
572 // left (most complex). This puts constants before unary operators before
575 // 2. Transform: (op (op V, C1), C2) ==> (op V, (op C1, C2))
576 // 3. Transform: (op (op V1, C1), (op V2, C2)) ==> (op (op V1, V2), (op C1,C2))
578 bool InstCombiner::SimplifyCommutative(BinaryOperator &I) {
579 bool Changed = false;
580 if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1)))
581 Changed = !I.swapOperands();
583 if (!I.isAssociative()) return Changed;
584 Instruction::BinaryOps Opcode = I.getOpcode();
585 if (BinaryOperator *Op = dyn_cast<BinaryOperator>(I.getOperand(0)))
586 if (Op->getOpcode() == Opcode && isa<Constant>(Op->getOperand(1))) {
587 if (isa<Constant>(I.getOperand(1))) {
588 Constant *Folded = ConstantExpr::get(I.getOpcode(),
589 cast<Constant>(I.getOperand(1)),
590 cast<Constant>(Op->getOperand(1)));
591 I.setOperand(0, Op->getOperand(0));
592 I.setOperand(1, Folded);
594 } else if (BinaryOperator *Op1=dyn_cast<BinaryOperator>(I.getOperand(1)))
595 if (Op1->getOpcode() == Opcode && isa<Constant>(Op1->getOperand(1)) &&
596 isOnlyUse(Op) && isOnlyUse(Op1)) {
597 Constant *C1 = cast<Constant>(Op->getOperand(1));
598 Constant *C2 = cast<Constant>(Op1->getOperand(1));
600 // Fold (op (op V1, C1), (op V2, C2)) ==> (op (op V1, V2), (op C1,C2))
601 Constant *Folded = ConstantExpr::get(I.getOpcode(), C1, C2);
602 Instruction *New = BinaryOperator::Create(Opcode, Op->getOperand(0),
606 I.setOperand(0, New);
607 I.setOperand(1, Folded);
614 // dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction
615 // if the LHS is a constant zero (which is the 'negate' form).
617 static inline Value *dyn_castNegVal(Value *V) {
618 if (BinaryOperator::isNeg(V))
619 return BinaryOperator::getNegArgument(V);
621 // Constants can be considered to be negated values if they can be folded.
622 if (ConstantInt *C = dyn_cast<ConstantInt>(V))
623 return ConstantExpr::getNeg(C);
625 if (ConstantVector *C = dyn_cast<ConstantVector>(V))
626 if (C->getType()->getElementType()->isInteger())
627 return ConstantExpr::getNeg(C);
632 // dyn_castFNegVal - Given a 'fsub' instruction, return the RHS of the
633 // instruction if the LHS is a constant negative zero (which is the 'negate'
636 static inline Value *dyn_castFNegVal(Value *V) {
637 if (BinaryOperator::isFNeg(V))
638 return BinaryOperator::getFNegArgument(V);
640 // Constants can be considered to be negated values if they can be folded.
641 if (ConstantFP *C = dyn_cast<ConstantFP>(V))
642 return ConstantExpr::getFNeg(C);
644 if (ConstantVector *C = dyn_cast<ConstantVector>(V))
645 if (C->getType()->getElementType()->isFloatingPoint())
646 return ConstantExpr::getFNeg(C);
651 /// isFreeToInvert - Return true if the specified value is free to invert (apply
652 /// ~ to). This happens in cases where the ~ can be eliminated.
653 static inline bool isFreeToInvert(Value *V) {
655 if (BinaryOperator::isNot(V))
658 // Constants can be considered to be not'ed values.
659 if (isa<ConstantInt>(V))
662 // Compares can be inverted if they have a single use.
663 if (CmpInst *CI = dyn_cast<CmpInst>(V))
664 return CI->hasOneUse();
669 static inline Value *dyn_castNotVal(Value *V) {
670 // If this is not(not(x)) don't return that this is a not: we want the two
671 // not's to be folded first.
672 if (BinaryOperator::isNot(V)) {
673 Value *Operand = BinaryOperator::getNotArgument(V);
674 if (!isFreeToInvert(Operand))
678 // Constants can be considered to be not'ed values...
679 if (ConstantInt *C = dyn_cast<ConstantInt>(V))
680 return ConstantInt::get(C->getType(), ~C->getValue());
686 // dyn_castFoldableMul - If this value is a multiply that can be folded into
687 // other computations (because it has a constant operand), return the
688 // non-constant operand of the multiply, and set CST to point to the multiplier.
689 // Otherwise, return null.
691 static inline Value *dyn_castFoldableMul(Value *V, ConstantInt *&CST) {
692 if (V->hasOneUse() && V->getType()->isInteger())
693 if (Instruction *I = dyn_cast<Instruction>(V)) {
694 if (I->getOpcode() == Instruction::Mul)
695 if ((CST = dyn_cast<ConstantInt>(I->getOperand(1))))
696 return I->getOperand(0);
697 if (I->getOpcode() == Instruction::Shl)
698 if ((CST = dyn_cast<ConstantInt>(I->getOperand(1)))) {
699 // The multiplier is really 1 << CST.
700 uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
701 uint32_t CSTVal = CST->getLimitedValue(BitWidth);
702 CST = ConstantInt::get(V->getType()->getContext(),
703 APInt(BitWidth, 1).shl(CSTVal));
704 return I->getOperand(0);
710 /// AddOne - Add one to a ConstantInt
711 static Constant *AddOne(Constant *C) {
712 return ConstantExpr::getAdd(C,
713 ConstantInt::get(C->getType(), 1));
715 /// SubOne - Subtract one from a ConstantInt
716 static Constant *SubOne(ConstantInt *C) {
717 return ConstantExpr::getSub(C,
718 ConstantInt::get(C->getType(), 1));
720 /// MultiplyOverflows - True if the multiply can not be expressed in an int
722 static bool MultiplyOverflows(ConstantInt *C1, ConstantInt *C2, bool sign) {
723 uint32_t W = C1->getBitWidth();
724 APInt LHSExt = C1->getValue(), RHSExt = C2->getValue();
733 APInt MulExt = LHSExt * RHSExt;
736 APInt Min = APInt::getSignedMinValue(W).sext(W * 2);
737 APInt Max = APInt::getSignedMaxValue(W).sext(W * 2);
738 return MulExt.slt(Min) || MulExt.sgt(Max);
740 return MulExt.ugt(APInt::getLowBitsSet(W * 2, W));
744 /// ShrinkDemandedConstant - Check to see if the specified operand of the
745 /// specified instruction is a constant integer. If so, check to see if there
746 /// are any bits set in the constant that are not demanded. If so, shrink the
747 /// constant and return true.
748 static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo,
750 assert(I && "No instruction?");
751 assert(OpNo < I->getNumOperands() && "Operand index too large");
753 // If the operand is not a constant integer, nothing to do.
754 ConstantInt *OpC = dyn_cast<ConstantInt>(I->getOperand(OpNo));
755 if (!OpC) return false;
757 // If there are no bits set that aren't demanded, nothing to do.
758 Demanded.zextOrTrunc(OpC->getValue().getBitWidth());
759 if ((~Demanded & OpC->getValue()) == 0)
762 // This instruction is producing bits that are not demanded. Shrink the RHS.
763 Demanded &= OpC->getValue();
764 I->setOperand(OpNo, ConstantInt::get(OpC->getType(), Demanded));
768 // ComputeSignedMinMaxValuesFromKnownBits - Given a signed integer type and a
769 // set of known zero and one bits, compute the maximum and minimum values that
770 // could have the specified known zero and known one bits, returning them in
772 static void ComputeSignedMinMaxValuesFromKnownBits(const APInt& KnownZero,
773 const APInt& KnownOne,
774 APInt& Min, APInt& Max) {
775 assert(KnownZero.getBitWidth() == KnownOne.getBitWidth() &&
776 KnownZero.getBitWidth() == Min.getBitWidth() &&
777 KnownZero.getBitWidth() == Max.getBitWidth() &&
778 "KnownZero, KnownOne and Min, Max must have equal bitwidth.");
779 APInt UnknownBits = ~(KnownZero|KnownOne);
781 // The minimum value is when all unknown bits are zeros, EXCEPT for the sign
782 // bit if it is unknown.
784 Max = KnownOne|UnknownBits;
786 if (UnknownBits.isNegative()) { // Sign bit is unknown
787 Min.set(Min.getBitWidth()-1);
788 Max.clear(Max.getBitWidth()-1);
792 // ComputeUnsignedMinMaxValuesFromKnownBits - Given an unsigned integer type and
793 // a set of known zero and one bits, compute the maximum and minimum values that
794 // could have the specified known zero and known one bits, returning them in
796 static void ComputeUnsignedMinMaxValuesFromKnownBits(const APInt &KnownZero,
797 const APInt &KnownOne,
798 APInt &Min, APInt &Max) {
799 assert(KnownZero.getBitWidth() == KnownOne.getBitWidth() &&
800 KnownZero.getBitWidth() == Min.getBitWidth() &&
801 KnownZero.getBitWidth() == Max.getBitWidth() &&
802 "Ty, KnownZero, KnownOne and Min, Max must have equal bitwidth.");
803 APInt UnknownBits = ~(KnownZero|KnownOne);
805 // The minimum value is when the unknown bits are all zeros.
807 // The maximum value is when the unknown bits are all ones.
808 Max = KnownOne|UnknownBits;
811 /// SimplifyDemandedInstructionBits - Inst is an integer instruction that
812 /// SimplifyDemandedBits knows about. See if the instruction has any
813 /// properties that allow us to simplify its operands.
814 bool InstCombiner::SimplifyDemandedInstructionBits(Instruction &Inst) {
815 unsigned BitWidth = Inst.getType()->getScalarSizeInBits();
816 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
817 APInt DemandedMask(APInt::getAllOnesValue(BitWidth));
819 Value *V = SimplifyDemandedUseBits(&Inst, DemandedMask,
820 KnownZero, KnownOne, 0);
821 if (V == 0) return false;
822 if (V == &Inst) return true;
823 ReplaceInstUsesWith(Inst, V);
827 /// SimplifyDemandedBits - This form of SimplifyDemandedBits simplifies the
828 /// specified instruction operand if possible, updating it in place. It returns
829 /// true if it made any change and false otherwise.
830 bool InstCombiner::SimplifyDemandedBits(Use &U, APInt DemandedMask,
831 APInt &KnownZero, APInt &KnownOne,
833 Value *NewVal = SimplifyDemandedUseBits(U.get(), DemandedMask,
834 KnownZero, KnownOne, Depth);
835 if (NewVal == 0) return false;
841 /// SimplifyDemandedUseBits - This function attempts to replace V with a simpler
842 /// value based on the demanded bits. When this function is called, it is known
843 /// that only the bits set in DemandedMask of the result of V are ever used
844 /// downstream. Consequently, depending on the mask and V, it may be possible
845 /// to replace V with a constant or one of its operands. In such cases, this
846 /// function does the replacement and returns true. In all other cases, it
847 /// returns false after analyzing the expression and setting KnownOne and known
848 /// to be one in the expression. KnownZero contains all the bits that are known
849 /// to be zero in the expression. These are provided to potentially allow the
850 /// caller (which might recursively be SimplifyDemandedBits itself) to simplify
851 /// the expression. KnownOne and KnownZero always follow the invariant that
852 /// KnownOne & KnownZero == 0. That is, a bit can't be both 1 and 0. Note that
853 /// the bits in KnownOne and KnownZero may only be accurate for those bits set
854 /// in DemandedMask. Note also that the bitwidth of V, DemandedMask, KnownZero
855 /// and KnownOne must all be the same.
857 /// This returns null if it did not change anything and it permits no
858 /// simplification. This returns V itself if it did some simplification of V's
859 /// operands based on the information about what bits are demanded. This returns
860 /// some other non-null value if it found out that V is equal to another value
861 /// in the context where the specified bits are demanded, but not for all users.
862 Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
863 APInt &KnownZero, APInt &KnownOne,
865 assert(V != 0 && "Null pointer of Value???");
866 assert(Depth <= 6 && "Limit Search Depth");
867 uint32_t BitWidth = DemandedMask.getBitWidth();
868 const Type *VTy = V->getType();
869 assert((TD || !isa<PointerType>(VTy)) &&
870 "SimplifyDemandedBits needs to know bit widths!");
871 assert((!TD || TD->getTypeSizeInBits(VTy->getScalarType()) == BitWidth) &&
872 (!VTy->isIntOrIntVector() ||
873 VTy->getScalarSizeInBits() == BitWidth) &&
874 KnownZero.getBitWidth() == BitWidth &&
875 KnownOne.getBitWidth() == BitWidth &&
876 "Value *V, DemandedMask, KnownZero and KnownOne "
877 "must have same BitWidth");
878 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
879 // We know all of the bits for a constant!
880 KnownOne = CI->getValue() & DemandedMask;
881 KnownZero = ~KnownOne & DemandedMask;
884 if (isa<ConstantPointerNull>(V)) {
885 // We know all of the bits for a constant!
887 KnownZero = DemandedMask;
893 if (DemandedMask == 0) { // Not demanding any bits from V.
894 if (isa<UndefValue>(V))
896 return UndefValue::get(VTy);
899 if (Depth == 6) // Limit search depth.
902 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
903 APInt &RHSKnownZero = KnownZero, &RHSKnownOne = KnownOne;
905 Instruction *I = dyn_cast<Instruction>(V);
907 ComputeMaskedBits(V, DemandedMask, RHSKnownZero, RHSKnownOne, Depth);
908 return 0; // Only analyze instructions.
911 // If there are multiple uses of this value and we aren't at the root, then
912 // we can't do any simplifications of the operands, because DemandedMask
913 // only reflects the bits demanded by *one* of the users.
914 if (Depth != 0 && !I->hasOneUse()) {
915 // Despite the fact that we can't simplify this instruction in all User's
916 // context, we can at least compute the knownzero/knownone bits, and we can
917 // do simplifications that apply to *just* the one user if we know that
918 // this instruction has a simpler value in that context.
919 if (I->getOpcode() == Instruction::And) {
920 // If either the LHS or the RHS are Zero, the result is zero.
921 ComputeMaskedBits(I->getOperand(1), DemandedMask,
922 RHSKnownZero, RHSKnownOne, Depth+1);
923 ComputeMaskedBits(I->getOperand(0), DemandedMask & ~RHSKnownZero,
924 LHSKnownZero, LHSKnownOne, Depth+1);
926 // If all of the demanded bits are known 1 on one side, return the other.
927 // These bits cannot contribute to the result of the 'and' in this
929 if ((DemandedMask & ~LHSKnownZero & RHSKnownOne) ==
930 (DemandedMask & ~LHSKnownZero))
931 return I->getOperand(0);
932 if ((DemandedMask & ~RHSKnownZero & LHSKnownOne) ==
933 (DemandedMask & ~RHSKnownZero))
934 return I->getOperand(1);
936 // If all of the demanded bits in the inputs are known zeros, return zero.
937 if ((DemandedMask & (RHSKnownZero|LHSKnownZero)) == DemandedMask)
938 return Constant::getNullValue(VTy);
940 } else if (I->getOpcode() == Instruction::Or) {
941 // We can simplify (X|Y) -> X or Y in the user's context if we know that
942 // only bits from X or Y are demanded.
944 // If either the LHS or the RHS are One, the result is One.
945 ComputeMaskedBits(I->getOperand(1), DemandedMask,
946 RHSKnownZero, RHSKnownOne, Depth+1);
947 ComputeMaskedBits(I->getOperand(0), DemandedMask & ~RHSKnownOne,
948 LHSKnownZero, LHSKnownOne, Depth+1);
950 // If all of the demanded bits are known zero on one side, return the
951 // other. These bits cannot contribute to the result of the 'or' in this
953 if ((DemandedMask & ~LHSKnownOne & RHSKnownZero) ==
954 (DemandedMask & ~LHSKnownOne))
955 return I->getOperand(0);
956 if ((DemandedMask & ~RHSKnownOne & LHSKnownZero) ==
957 (DemandedMask & ~RHSKnownOne))
958 return I->getOperand(1);
960 // If all of the potentially set bits on one side are known to be set on
961 // the other side, just use the 'other' side.
962 if ((DemandedMask & (~RHSKnownZero) & LHSKnownOne) ==
963 (DemandedMask & (~RHSKnownZero)))
964 return I->getOperand(0);
965 if ((DemandedMask & (~LHSKnownZero) & RHSKnownOne) ==
966 (DemandedMask & (~LHSKnownZero)))
967 return I->getOperand(1);
970 // Compute the KnownZero/KnownOne bits to simplify things downstream.
971 ComputeMaskedBits(I, DemandedMask, KnownZero, KnownOne, Depth);
975 // If this is the root being simplified, allow it to have multiple uses,
976 // just set the DemandedMask to all bits so that we can try to simplify the
977 // operands. This allows visitTruncInst (for example) to simplify the
978 // operand of a trunc without duplicating all the logic below.
979 if (Depth == 0 && !V->hasOneUse())
980 DemandedMask = APInt::getAllOnesValue(BitWidth);
982 switch (I->getOpcode()) {
984 ComputeMaskedBits(I, DemandedMask, RHSKnownZero, RHSKnownOne, Depth);
986 case Instruction::And:
987 // If either the LHS or the RHS are Zero, the result is zero.
988 if (SimplifyDemandedBits(I->getOperandUse(1), DemandedMask,
989 RHSKnownZero, RHSKnownOne, Depth+1) ||
990 SimplifyDemandedBits(I->getOperandUse(0), DemandedMask & ~RHSKnownZero,
991 LHSKnownZero, LHSKnownOne, Depth+1))
993 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
994 assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
996 // If all of the demanded bits are known 1 on one side, return the other.
997 // These bits cannot contribute to the result of the 'and'.
998 if ((DemandedMask & ~LHSKnownZero & RHSKnownOne) ==
999 (DemandedMask & ~LHSKnownZero))
1000 return I->getOperand(0);
1001 if ((DemandedMask & ~RHSKnownZero & LHSKnownOne) ==
1002 (DemandedMask & ~RHSKnownZero))
1003 return I->getOperand(1);
1005 // If all of the demanded bits in the inputs are known zeros, return zero.
1006 if ((DemandedMask & (RHSKnownZero|LHSKnownZero)) == DemandedMask)
1007 return Constant::getNullValue(VTy);
1009 // If the RHS is a constant, see if we can simplify it.
1010 if (ShrinkDemandedConstant(I, 1, DemandedMask & ~LHSKnownZero))
1013 // Output known-1 bits are only known if set in both the LHS & RHS.
1014 RHSKnownOne &= LHSKnownOne;
1015 // Output known-0 are known to be clear if zero in either the LHS | RHS.
1016 RHSKnownZero |= LHSKnownZero;
1018 case Instruction::Or:
1019 // If either the LHS or the RHS are One, the result is One.
1020 if (SimplifyDemandedBits(I->getOperandUse(1), DemandedMask,
1021 RHSKnownZero, RHSKnownOne, Depth+1) ||
1022 SimplifyDemandedBits(I->getOperandUse(0), DemandedMask & ~RHSKnownOne,
1023 LHSKnownZero, LHSKnownOne, Depth+1))
1025 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1026 assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
1028 // If all of the demanded bits are known zero on one side, return the other.
1029 // These bits cannot contribute to the result of the 'or'.
1030 if ((DemandedMask & ~LHSKnownOne & RHSKnownZero) ==
1031 (DemandedMask & ~LHSKnownOne))
1032 return I->getOperand(0);
1033 if ((DemandedMask & ~RHSKnownOne & LHSKnownZero) ==
1034 (DemandedMask & ~RHSKnownOne))
1035 return I->getOperand(1);
1037 // If all of the potentially set bits on one side are known to be set on
1038 // the other side, just use the 'other' side.
1039 if ((DemandedMask & (~RHSKnownZero) & LHSKnownOne) ==
1040 (DemandedMask & (~RHSKnownZero)))
1041 return I->getOperand(0);
1042 if ((DemandedMask & (~LHSKnownZero) & RHSKnownOne) ==
1043 (DemandedMask & (~LHSKnownZero)))
1044 return I->getOperand(1);
1046 // If the RHS is a constant, see if we can simplify it.
1047 if (ShrinkDemandedConstant(I, 1, DemandedMask))
1050 // Output known-0 bits are only known if clear in both the LHS & RHS.
1051 RHSKnownZero &= LHSKnownZero;
1052 // Output known-1 are known to be set if set in either the LHS | RHS.
1053 RHSKnownOne |= LHSKnownOne;
1055 case Instruction::Xor: {
1056 if (SimplifyDemandedBits(I->getOperandUse(1), DemandedMask,
1057 RHSKnownZero, RHSKnownOne, Depth+1) ||
1058 SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
1059 LHSKnownZero, LHSKnownOne, Depth+1))
1061 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1062 assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
1064 // If all of the demanded bits are known zero on one side, return the other.
1065 // These bits cannot contribute to the result of the 'xor'.
1066 if ((DemandedMask & RHSKnownZero) == DemandedMask)
1067 return I->getOperand(0);
1068 if ((DemandedMask & LHSKnownZero) == DemandedMask)
1069 return I->getOperand(1);
1071 // Output known-0 bits are known if clear or set in both the LHS & RHS.
1072 APInt KnownZeroOut = (RHSKnownZero & LHSKnownZero) |
1073 (RHSKnownOne & LHSKnownOne);
1074 // Output known-1 are known to be set if set in only one of the LHS, RHS.
1075 APInt KnownOneOut = (RHSKnownZero & LHSKnownOne) |
1076 (RHSKnownOne & LHSKnownZero);
1078 // If all of the demanded bits are known to be zero on one side or the
1079 // other, turn this into an *inclusive* or.
1080 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
1081 if ((DemandedMask & ~RHSKnownZero & ~LHSKnownZero) == 0) {
1083 BinaryOperator::CreateOr(I->getOperand(0), I->getOperand(1),
1085 return InsertNewInstBefore(Or, *I);
1088 // If all of the demanded bits on one side are known, and all of the set
1089 // bits on that side are also known to be set on the other side, turn this
1090 // into an AND, as we know the bits will be cleared.
1091 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
1092 if ((DemandedMask & (RHSKnownZero|RHSKnownOne)) == DemandedMask) {
1094 if ((RHSKnownOne & LHSKnownOne) == RHSKnownOne) {
1095 Constant *AndC = Constant::getIntegerValue(VTy,
1096 ~RHSKnownOne & DemandedMask);
1098 BinaryOperator::CreateAnd(I->getOperand(0), AndC, "tmp");
1099 return InsertNewInstBefore(And, *I);
1103 // If the RHS is a constant, see if we can simplify it.
1104 // FIXME: for XOR, we prefer to force bits to 1 if they will make a -1.
1105 if (ShrinkDemandedConstant(I, 1, DemandedMask))
1108 // If our LHS is an 'and' and if it has one use, and if any of the bits we
1109 // are flipping are known to be set, then the xor is just resetting those
1110 // bits to zero. We can just knock out bits from the 'and' and the 'xor',
1111 // simplifying both of them.
1112 if (Instruction *LHSInst = dyn_cast<Instruction>(I->getOperand(0)))
1113 if (LHSInst->getOpcode() == Instruction::And && LHSInst->hasOneUse() &&
1114 isa<ConstantInt>(I->getOperand(1)) &&
1115 isa<ConstantInt>(LHSInst->getOperand(1)) &&
1116 (LHSKnownOne & RHSKnownOne & DemandedMask) != 0) {
1117 ConstantInt *AndRHS = cast<ConstantInt>(LHSInst->getOperand(1));
1118 ConstantInt *XorRHS = cast<ConstantInt>(I->getOperand(1));
1119 APInt NewMask = ~(LHSKnownOne & RHSKnownOne & DemandedMask);
1122 ConstantInt::get(I->getType(), NewMask & AndRHS->getValue());
1123 Instruction *NewAnd =
1124 BinaryOperator::CreateAnd(I->getOperand(0), AndC, "tmp");
1125 InsertNewInstBefore(NewAnd, *I);
1128 ConstantInt::get(I->getType(), NewMask & XorRHS->getValue());
1129 Instruction *NewXor =
1130 BinaryOperator::CreateXor(NewAnd, XorC, "tmp");
1131 return InsertNewInstBefore(NewXor, *I);
1135 RHSKnownZero = KnownZeroOut;
1136 RHSKnownOne = KnownOneOut;
1139 case Instruction::Select:
1140 if (SimplifyDemandedBits(I->getOperandUse(2), DemandedMask,
1141 RHSKnownZero, RHSKnownOne, Depth+1) ||
1142 SimplifyDemandedBits(I->getOperandUse(1), DemandedMask,
1143 LHSKnownZero, LHSKnownOne, Depth+1))
1145 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1146 assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
1148 // If the operands are constants, see if we can simplify them.
1149 if (ShrinkDemandedConstant(I, 1, DemandedMask) ||
1150 ShrinkDemandedConstant(I, 2, DemandedMask))
1153 // Only known if known in both the LHS and RHS.
1154 RHSKnownOne &= LHSKnownOne;
1155 RHSKnownZero &= LHSKnownZero;
1157 case Instruction::Trunc: {
1158 unsigned truncBf = I->getOperand(0)->getType()->getScalarSizeInBits();
1159 DemandedMask.zext(truncBf);
1160 RHSKnownZero.zext(truncBf);
1161 RHSKnownOne.zext(truncBf);
1162 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
1163 RHSKnownZero, RHSKnownOne, Depth+1))
1165 DemandedMask.trunc(BitWidth);
1166 RHSKnownZero.trunc(BitWidth);
1167 RHSKnownOne.trunc(BitWidth);
1168 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1171 case Instruction::BitCast:
1172 if (!I->getOperand(0)->getType()->isIntOrIntVector())
1173 return false; // vector->int or fp->int?
1175 if (const VectorType *DstVTy = dyn_cast<VectorType>(I->getType())) {
1176 if (const VectorType *SrcVTy =
1177 dyn_cast<VectorType>(I->getOperand(0)->getType())) {
1178 if (DstVTy->getNumElements() != SrcVTy->getNumElements())
1179 // Don't touch a bitcast between vectors of different element counts.
1182 // Don't touch a scalar-to-vector bitcast.
1184 } else if (isa<VectorType>(I->getOperand(0)->getType()))
1185 // Don't touch a vector-to-scalar bitcast.
1188 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
1189 RHSKnownZero, RHSKnownOne, Depth+1))
1191 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1193 case Instruction::ZExt: {
1194 // Compute the bits in the result that are not present in the input.
1195 unsigned SrcBitWidth =I->getOperand(0)->getType()->getScalarSizeInBits();
1197 DemandedMask.trunc(SrcBitWidth);
1198 RHSKnownZero.trunc(SrcBitWidth);
1199 RHSKnownOne.trunc(SrcBitWidth);
1200 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
1201 RHSKnownZero, RHSKnownOne, Depth+1))
1203 DemandedMask.zext(BitWidth);
1204 RHSKnownZero.zext(BitWidth);
1205 RHSKnownOne.zext(BitWidth);
1206 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1207 // The top bits are known to be zero.
1208 RHSKnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
1211 case Instruction::SExt: {
1212 // Compute the bits in the result that are not present in the input.
1213 unsigned SrcBitWidth =I->getOperand(0)->getType()->getScalarSizeInBits();
1215 APInt InputDemandedBits = DemandedMask &
1216 APInt::getLowBitsSet(BitWidth, SrcBitWidth);
1218 APInt NewBits(APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth));
1219 // If any of the sign extended bits are demanded, we know that the sign
1221 if ((NewBits & DemandedMask) != 0)
1222 InputDemandedBits.set(SrcBitWidth-1);
1224 InputDemandedBits.trunc(SrcBitWidth);
1225 RHSKnownZero.trunc(SrcBitWidth);
1226 RHSKnownOne.trunc(SrcBitWidth);
1227 if (SimplifyDemandedBits(I->getOperandUse(0), InputDemandedBits,
1228 RHSKnownZero, RHSKnownOne, Depth+1))
1230 InputDemandedBits.zext(BitWidth);
1231 RHSKnownZero.zext(BitWidth);
1232 RHSKnownOne.zext(BitWidth);
1233 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1235 // If the sign bit of the input is known set or clear, then we know the
1236 // top bits of the result.
1238 // If the input sign bit is known zero, or if the NewBits are not demanded
1239 // convert this into a zero extension.
1240 if (RHSKnownZero[SrcBitWidth-1] || (NewBits & ~DemandedMask) == NewBits) {
1241 // Convert to ZExt cast
1242 CastInst *NewCast = new ZExtInst(I->getOperand(0), VTy, I->getName());
1243 return InsertNewInstBefore(NewCast, *I);
1244 } else if (RHSKnownOne[SrcBitWidth-1]) { // Input sign bit known set
1245 RHSKnownOne |= NewBits;
1249 case Instruction::Add: {
1250 // Figure out what the input bits are. If the top bits of the and result
1251 // are not demanded, then the add doesn't demand them from its input
1253 unsigned NLZ = DemandedMask.countLeadingZeros();
1255 // If there is a constant on the RHS, there are a variety of xformations
1257 if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
1258 // If null, this should be simplified elsewhere. Some of the xforms here
1259 // won't work if the RHS is zero.
1263 // If the top bit of the output is demanded, demand everything from the
1264 // input. Otherwise, we demand all the input bits except NLZ top bits.
1265 APInt InDemandedBits(APInt::getLowBitsSet(BitWidth, BitWidth - NLZ));
1267 // Find information about known zero/one bits in the input.
1268 if (SimplifyDemandedBits(I->getOperandUse(0), InDemandedBits,
1269 LHSKnownZero, LHSKnownOne, Depth+1))
1272 // If the RHS of the add has bits set that can't affect the input, reduce
1274 if (ShrinkDemandedConstant(I, 1, InDemandedBits))
1277 // Avoid excess work.
1278 if (LHSKnownZero == 0 && LHSKnownOne == 0)
1281 // Turn it into OR if input bits are zero.
1282 if ((LHSKnownZero & RHS->getValue()) == RHS->getValue()) {
1284 BinaryOperator::CreateOr(I->getOperand(0), I->getOperand(1),
1286 return InsertNewInstBefore(Or, *I);
1289 // We can say something about the output known-zero and known-one bits,
1290 // depending on potential carries from the input constant and the
1291 // unknowns. For example if the LHS is known to have at most the 0x0F0F0
1292 // bits set and the RHS constant is 0x01001, then we know we have a known
1293 // one mask of 0x00001 and a known zero mask of 0xE0F0E.
1295 // To compute this, we first compute the potential carry bits. These are
1296 // the bits which may be modified. I'm not aware of a better way to do
1298 const APInt &RHSVal = RHS->getValue();
1299 APInt CarryBits((~LHSKnownZero + RHSVal) ^ (~LHSKnownZero ^ RHSVal));
1301 // Now that we know which bits have carries, compute the known-1/0 sets.
1303 // Bits are known one if they are known zero in one operand and one in the
1304 // other, and there is no input carry.
1305 RHSKnownOne = ((LHSKnownZero & RHSVal) |
1306 (LHSKnownOne & ~RHSVal)) & ~CarryBits;
1308 // Bits are known zero if they are known zero in both operands and there
1309 // is no input carry.
1310 RHSKnownZero = LHSKnownZero & ~RHSVal & ~CarryBits;
1312 // If the high-bits of this ADD are not demanded, then it does not demand
1313 // the high bits of its LHS or RHS.
1314 if (DemandedMask[BitWidth-1] == 0) {
1315 // Right fill the mask of bits for this ADD to demand the most
1316 // significant bit and all those below it.
1317 APInt DemandedFromOps(APInt::getLowBitsSet(BitWidth, BitWidth-NLZ));
1318 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedFromOps,
1319 LHSKnownZero, LHSKnownOne, Depth+1) ||
1320 SimplifyDemandedBits(I->getOperandUse(1), DemandedFromOps,
1321 LHSKnownZero, LHSKnownOne, Depth+1))
1327 case Instruction::Sub:
1328 // If the high-bits of this SUB are not demanded, then it does not demand
1329 // the high bits of its LHS or RHS.
1330 if (DemandedMask[BitWidth-1] == 0) {
1331 // Right fill the mask of bits for this SUB to demand the most
1332 // significant bit and all those below it.
1333 uint32_t NLZ = DemandedMask.countLeadingZeros();
1334 APInt DemandedFromOps(APInt::getLowBitsSet(BitWidth, BitWidth-NLZ));
1335 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedFromOps,
1336 LHSKnownZero, LHSKnownOne, Depth+1) ||
1337 SimplifyDemandedBits(I->getOperandUse(1), DemandedFromOps,
1338 LHSKnownZero, LHSKnownOne, Depth+1))
1341 // Otherwise just hand the sub off to ComputeMaskedBits to fill in
1342 // the known zeros and ones.
1343 ComputeMaskedBits(V, DemandedMask, RHSKnownZero, RHSKnownOne, Depth);
1345 case Instruction::Shl:
1346 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
1347 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
1348 APInt DemandedMaskIn(DemandedMask.lshr(ShiftAmt));
1349 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMaskIn,
1350 RHSKnownZero, RHSKnownOne, Depth+1))
1352 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1353 RHSKnownZero <<= ShiftAmt;
1354 RHSKnownOne <<= ShiftAmt;
1355 // low bits known zero.
1357 RHSKnownZero |= APInt::getLowBitsSet(BitWidth, ShiftAmt);
1360 case Instruction::LShr:
1361 // For a logical shift right
1362 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
1363 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
1365 // Unsigned shift right.
1366 APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
1367 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMaskIn,
1368 RHSKnownZero, RHSKnownOne, Depth+1))
1370 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1371 RHSKnownZero = APIntOps::lshr(RHSKnownZero, ShiftAmt);
1372 RHSKnownOne = APIntOps::lshr(RHSKnownOne, ShiftAmt);
1374 // Compute the new bits that are at the top now.
1375 APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt));
1376 RHSKnownZero |= HighBits; // high bits known zero.
1380 case Instruction::AShr:
1381 // If this is an arithmetic shift right and only the low-bit is set, we can
1382 // always convert this into a logical shr, even if the shift amount is
1383 // variable. The low bit of the shift cannot be an input sign bit unless
1384 // the shift amount is >= the size of the datatype, which is undefined.
1385 if (DemandedMask == 1) {
1386 // Perform the logical shift right.
1387 Instruction *NewVal = BinaryOperator::CreateLShr(
1388 I->getOperand(0), I->getOperand(1), I->getName());
1389 return InsertNewInstBefore(NewVal, *I);
1392 // If the sign bit is the only bit demanded by this ashr, then there is no
1393 // need to do it, the shift doesn't change the high bit.
1394 if (DemandedMask.isSignBit())
1395 return I->getOperand(0);
1397 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
1398 uint32_t ShiftAmt = SA->getLimitedValue(BitWidth);
1400 // Signed shift right.
1401 APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
1402 // If any of the "high bits" are demanded, we should set the sign bit as
1404 if (DemandedMask.countLeadingZeros() <= ShiftAmt)
1405 DemandedMaskIn.set(BitWidth-1);
1406 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMaskIn,
1407 RHSKnownZero, RHSKnownOne, Depth+1))
1409 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1410 // Compute the new bits that are at the top now.
1411 APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt));
1412 RHSKnownZero = APIntOps::lshr(RHSKnownZero, ShiftAmt);
1413 RHSKnownOne = APIntOps::lshr(RHSKnownOne, ShiftAmt);
1415 // Handle the sign bits.
1416 APInt SignBit(APInt::getSignBit(BitWidth));
1417 // Adjust to where it is now in the mask.
1418 SignBit = APIntOps::lshr(SignBit, ShiftAmt);
1420 // If the input sign bit is known to be zero, or if none of the top bits
1421 // are demanded, turn this into an unsigned shift right.
1422 if (BitWidth <= ShiftAmt || RHSKnownZero[BitWidth-ShiftAmt-1] ||
1423 (HighBits & ~DemandedMask) == HighBits) {
1424 // Perform the logical shift right.
1425 Instruction *NewVal = BinaryOperator::CreateLShr(
1426 I->getOperand(0), SA, I->getName());
1427 return InsertNewInstBefore(NewVal, *I);
1428 } else if ((RHSKnownOne & SignBit) != 0) { // New bits are known one.
1429 RHSKnownOne |= HighBits;
1433 case Instruction::SRem:
1434 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1435 APInt RA = Rem->getValue().abs();
1436 if (RA.isPowerOf2()) {
1437 if (DemandedMask.ult(RA)) // srem won't affect demanded bits
1438 return I->getOperand(0);
1440 APInt LowBits = RA - 1;
1441 APInt Mask2 = LowBits | APInt::getSignBit(BitWidth);
1442 if (SimplifyDemandedBits(I->getOperandUse(0), Mask2,
1443 LHSKnownZero, LHSKnownOne, Depth+1))
1446 if (LHSKnownZero[BitWidth-1] || ((LHSKnownZero & LowBits) == LowBits))
1447 LHSKnownZero |= ~LowBits;
1449 KnownZero |= LHSKnownZero & DemandedMask;
1451 assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
1455 case Instruction::URem: {
1456 APInt KnownZero2(BitWidth, 0), KnownOne2(BitWidth, 0);
1457 APInt AllOnes = APInt::getAllOnesValue(BitWidth);
1458 if (SimplifyDemandedBits(I->getOperandUse(0), AllOnes,
1459 KnownZero2, KnownOne2, Depth+1) ||
1460 SimplifyDemandedBits(I->getOperandUse(1), AllOnes,
1461 KnownZero2, KnownOne2, Depth+1))
1464 unsigned Leaders = KnownZero2.countLeadingOnes();
1465 Leaders = std::max(Leaders,
1466 KnownZero2.countLeadingOnes());
1467 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders) & DemandedMask;
1470 case Instruction::Call:
1471 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1472 switch (II->getIntrinsicID()) {
1474 case Intrinsic::bswap: {
1475 // If the only bits demanded come from one byte of the bswap result,
1476 // just shift the input byte into position to eliminate the bswap.
1477 unsigned NLZ = DemandedMask.countLeadingZeros();
1478 unsigned NTZ = DemandedMask.countTrailingZeros();
1480 // Round NTZ down to the next byte. If we have 11 trailing zeros, then
1481 // we need all the bits down to bit 8. Likewise, round NLZ. If we
1482 // have 14 leading zeros, round to 8.
1485 // If we need exactly one byte, we can do this transformation.
1486 if (BitWidth-NLZ-NTZ == 8) {
1487 unsigned ResultBit = NTZ;
1488 unsigned InputBit = BitWidth-NTZ-8;
1490 // Replace this with either a left or right shift to get the byte into
1492 Instruction *NewVal;
1493 if (InputBit > ResultBit)
1494 NewVal = BinaryOperator::CreateLShr(I->getOperand(1),
1495 ConstantInt::get(I->getType(), InputBit-ResultBit));
1497 NewVal = BinaryOperator::CreateShl(I->getOperand(1),
1498 ConstantInt::get(I->getType(), ResultBit-InputBit));
1499 NewVal->takeName(I);
1500 return InsertNewInstBefore(NewVal, *I);
1503 // TODO: Could compute known zero/one bits based on the input.
1508 ComputeMaskedBits(V, DemandedMask, RHSKnownZero, RHSKnownOne, Depth);
1512 // If the client is only demanding bits that we know, return the known
1514 if ((DemandedMask & (RHSKnownZero|RHSKnownOne)) == DemandedMask)
1515 return Constant::getIntegerValue(VTy, RHSKnownOne);
1520 /// SimplifyDemandedVectorElts - The specified value produces a vector with
1521 /// any number of elements. DemandedElts contains the set of elements that are
1522 /// actually used by the caller. This method analyzes which elements of the
1523 /// operand are undef and returns that information in UndefElts.
1525 /// If the information about demanded elements can be used to simplify the
1526 /// operation, the operation is simplified, then the resultant value is
1527 /// returned. This returns null if no change was made.
1528 Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
1531 unsigned VWidth = cast<VectorType>(V->getType())->getNumElements();
1532 APInt EltMask(APInt::getAllOnesValue(VWidth));
1533 assert((DemandedElts & ~EltMask) == 0 && "Invalid DemandedElts!");
1535 if (isa<UndefValue>(V)) {
1536 // If the entire vector is undefined, just return this info.
1537 UndefElts = EltMask;
1539 } else if (DemandedElts == 0) { // If nothing is demanded, provide undef.
1540 UndefElts = EltMask;
1541 return UndefValue::get(V->getType());
1545 if (ConstantVector *CP = dyn_cast<ConstantVector>(V)) {
1546 const Type *EltTy = cast<VectorType>(V->getType())->getElementType();
1547 Constant *Undef = UndefValue::get(EltTy);
1549 std::vector<Constant*> Elts;
1550 for (unsigned i = 0; i != VWidth; ++i)
1551 if (!DemandedElts[i]) { // If not demanded, set to undef.
1552 Elts.push_back(Undef);
1554 } else if (isa<UndefValue>(CP->getOperand(i))) { // Already undef.
1555 Elts.push_back(Undef);
1557 } else { // Otherwise, defined.
1558 Elts.push_back(CP->getOperand(i));
1561 // If we changed the constant, return it.
1562 Constant *NewCP = ConstantVector::get(Elts);
1563 return NewCP != CP ? NewCP : 0;
1564 } else if (isa<ConstantAggregateZero>(V)) {
1565 // Simplify the CAZ to a ConstantVector where the non-demanded elements are
1568 // Check if this is identity. If so, return 0 since we are not simplifying
1570 if (DemandedElts == ((1ULL << VWidth) -1))
1573 const Type *EltTy = cast<VectorType>(V->getType())->getElementType();
1574 Constant *Zero = Constant::getNullValue(EltTy);
1575 Constant *Undef = UndefValue::get(EltTy);
1576 std::vector<Constant*> Elts;
1577 for (unsigned i = 0; i != VWidth; ++i) {
1578 Constant *Elt = DemandedElts[i] ? Zero : Undef;
1579 Elts.push_back(Elt);
1581 UndefElts = DemandedElts ^ EltMask;
1582 return ConstantVector::get(Elts);
1585 // Limit search depth.
1589 // If multiple users are using the root value, procede with
1590 // simplification conservatively assuming that all elements
1592 if (!V->hasOneUse()) {
1593 // Quit if we find multiple users of a non-root value though.
1594 // They'll be handled when it's their turn to be visited by
1595 // the main instcombine process.
1597 // TODO: Just compute the UndefElts information recursively.
1600 // Conservatively assume that all elements are needed.
1601 DemandedElts = EltMask;
1604 Instruction *I = dyn_cast<Instruction>(V);
1605 if (!I) return 0; // Only analyze instructions.
1607 bool MadeChange = false;
1608 APInt UndefElts2(VWidth, 0);
1610 switch (I->getOpcode()) {
1613 case Instruction::InsertElement: {
1614 // If this is a variable index, we don't know which element it overwrites.
1615 // demand exactly the same input as we produce.
1616 ConstantInt *Idx = dyn_cast<ConstantInt>(I->getOperand(2));
1618 // Note that we can't propagate undef elt info, because we don't know
1619 // which elt is getting updated.
1620 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts,
1621 UndefElts2, Depth+1);
1622 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1626 // If this is inserting an element that isn't demanded, remove this
1628 unsigned IdxNo = Idx->getZExtValue();
1629 if (IdxNo >= VWidth || !DemandedElts[IdxNo]) {
1631 return I->getOperand(0);
1634 // Otherwise, the element inserted overwrites whatever was there, so the
1635 // input demanded set is simpler than the output set.
1636 APInt DemandedElts2 = DemandedElts;
1637 DemandedElts2.clear(IdxNo);
1638 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts2,
1639 UndefElts, Depth+1);
1640 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1642 // The inserted element is defined.
1643 UndefElts.clear(IdxNo);
1646 case Instruction::ShuffleVector: {
1647 ShuffleVectorInst *Shuffle = cast<ShuffleVectorInst>(I);
1648 uint64_t LHSVWidth =
1649 cast<VectorType>(Shuffle->getOperand(0)->getType())->getNumElements();
1650 APInt LeftDemanded(LHSVWidth, 0), RightDemanded(LHSVWidth, 0);
1651 for (unsigned i = 0; i < VWidth; i++) {
1652 if (DemandedElts[i]) {
1653 unsigned MaskVal = Shuffle->getMaskValue(i);
1654 if (MaskVal != -1u) {
1655 assert(MaskVal < LHSVWidth * 2 &&
1656 "shufflevector mask index out of range!");
1657 if (MaskVal < LHSVWidth)
1658 LeftDemanded.set(MaskVal);
1660 RightDemanded.set(MaskVal - LHSVWidth);
1665 APInt UndefElts4(LHSVWidth, 0);
1666 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), LeftDemanded,
1667 UndefElts4, Depth+1);
1668 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1670 APInt UndefElts3(LHSVWidth, 0);
1671 TmpV = SimplifyDemandedVectorElts(I->getOperand(1), RightDemanded,
1672 UndefElts3, Depth+1);
1673 if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; }
1675 bool NewUndefElts = false;
1676 for (unsigned i = 0; i < VWidth; i++) {
1677 unsigned MaskVal = Shuffle->getMaskValue(i);
1678 if (MaskVal == -1u) {
1680 } else if (MaskVal < LHSVWidth) {
1681 if (UndefElts4[MaskVal]) {
1682 NewUndefElts = true;
1686 if (UndefElts3[MaskVal - LHSVWidth]) {
1687 NewUndefElts = true;
1694 // Add additional discovered undefs.
1695 std::vector<Constant*> Elts;
1696 for (unsigned i = 0; i < VWidth; ++i) {
1698 Elts.push_back(UndefValue::get(Type::getInt32Ty(*Context)));
1700 Elts.push_back(ConstantInt::get(Type::getInt32Ty(*Context),
1701 Shuffle->getMaskValue(i)));
1703 I->setOperand(2, ConstantVector::get(Elts));
1708 case Instruction::BitCast: {
1709 // Vector->vector casts only.
1710 const VectorType *VTy = dyn_cast<VectorType>(I->getOperand(0)->getType());
1712 unsigned InVWidth = VTy->getNumElements();
1713 APInt InputDemandedElts(InVWidth, 0);
1716 if (VWidth == InVWidth) {
1717 // If we are converting from <4 x i32> -> <4 x f32>, we demand the same
1718 // elements as are demanded of us.
1720 InputDemandedElts = DemandedElts;
1721 } else if (VWidth > InVWidth) {
1725 // If there are more elements in the result than there are in the source,
1726 // then an input element is live if any of the corresponding output
1727 // elements are live.
1728 Ratio = VWidth/InVWidth;
1729 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) {
1730 if (DemandedElts[OutIdx])
1731 InputDemandedElts.set(OutIdx/Ratio);
1737 // If there are more elements in the source than there are in the result,
1738 // then an input element is live if the corresponding output element is
1740 Ratio = InVWidth/VWidth;
1741 for (unsigned InIdx = 0; InIdx != InVWidth; ++InIdx)
1742 if (DemandedElts[InIdx/Ratio])
1743 InputDemandedElts.set(InIdx);
1746 // div/rem demand all inputs, because they don't want divide by zero.
1747 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), InputDemandedElts,
1748 UndefElts2, Depth+1);
1750 I->setOperand(0, TmpV);
1754 UndefElts = UndefElts2;
1755 if (VWidth > InVWidth) {
1756 llvm_unreachable("Unimp");
1757 // If there are more elements in the result than there are in the source,
1758 // then an output element is undef if the corresponding input element is
1760 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx)
1761 if (UndefElts2[OutIdx/Ratio])
1762 UndefElts.set(OutIdx);
1763 } else if (VWidth < InVWidth) {
1764 llvm_unreachable("Unimp");
1765 // If there are more elements in the source than there are in the result,
1766 // then a result element is undef if all of the corresponding input
1767 // elements are undef.
1768 UndefElts = ~0ULL >> (64-VWidth); // Start out all undef.
1769 for (unsigned InIdx = 0; InIdx != InVWidth; ++InIdx)
1770 if (!UndefElts2[InIdx]) // Not undef?
1771 UndefElts.clear(InIdx/Ratio); // Clear undef bit.
1775 case Instruction::And:
1776 case Instruction::Or:
1777 case Instruction::Xor:
1778 case Instruction::Add:
1779 case Instruction::Sub:
1780 case Instruction::Mul:
1781 // div/rem demand all inputs, because they don't want divide by zero.
1782 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts,
1783 UndefElts, Depth+1);
1784 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1785 TmpV = SimplifyDemandedVectorElts(I->getOperand(1), DemandedElts,
1786 UndefElts2, Depth+1);
1787 if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; }
1789 // Output elements are undefined if both are undefined. Consider things
1790 // like undef&0. The result is known zero, not undef.
1791 UndefElts &= UndefElts2;
1794 case Instruction::Call: {
1795 IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
1797 switch (II->getIntrinsicID()) {
1800 // Binary vector operations that work column-wise. A dest element is a
1801 // function of the corresponding input elements from the two inputs.
1802 case Intrinsic::x86_sse_sub_ss:
1803 case Intrinsic::x86_sse_mul_ss:
1804 case Intrinsic::x86_sse_min_ss:
1805 case Intrinsic::x86_sse_max_ss:
1806 case Intrinsic::x86_sse2_sub_sd:
1807 case Intrinsic::x86_sse2_mul_sd:
1808 case Intrinsic::x86_sse2_min_sd:
1809 case Intrinsic::x86_sse2_max_sd:
1810 TmpV = SimplifyDemandedVectorElts(II->getOperand(1), DemandedElts,
1811 UndefElts, Depth+1);
1812 if (TmpV) { II->setOperand(1, TmpV); MadeChange = true; }
1813 TmpV = SimplifyDemandedVectorElts(II->getOperand(2), DemandedElts,
1814 UndefElts2, Depth+1);
1815 if (TmpV) { II->setOperand(2, TmpV); MadeChange = true; }
1817 // If only the low elt is demanded and this is a scalarizable intrinsic,
1818 // scalarize it now.
1819 if (DemandedElts == 1) {
1820 switch (II->getIntrinsicID()) {
1822 case Intrinsic::x86_sse_sub_ss:
1823 case Intrinsic::x86_sse_mul_ss:
1824 case Intrinsic::x86_sse2_sub_sd:
1825 case Intrinsic::x86_sse2_mul_sd:
1826 // TODO: Lower MIN/MAX/ABS/etc
1827 Value *LHS = II->getOperand(1);
1828 Value *RHS = II->getOperand(2);
1829 // Extract the element as scalars.
1830 LHS = InsertNewInstBefore(ExtractElementInst::Create(LHS,
1831 ConstantInt::get(Type::getInt32Ty(*Context), 0U, false), "tmp"), *II);
1832 RHS = InsertNewInstBefore(ExtractElementInst::Create(RHS,
1833 ConstantInt::get(Type::getInt32Ty(*Context), 0U, false), "tmp"), *II);
1835 switch (II->getIntrinsicID()) {
1836 default: llvm_unreachable("Case stmts out of sync!");
1837 case Intrinsic::x86_sse_sub_ss:
1838 case Intrinsic::x86_sse2_sub_sd:
1839 TmpV = InsertNewInstBefore(BinaryOperator::CreateFSub(LHS, RHS,
1840 II->getName()), *II);
1842 case Intrinsic::x86_sse_mul_ss:
1843 case Intrinsic::x86_sse2_mul_sd:
1844 TmpV = InsertNewInstBefore(BinaryOperator::CreateFMul(LHS, RHS,
1845 II->getName()), *II);
1850 InsertElementInst::Create(
1851 UndefValue::get(II->getType()), TmpV,
1852 ConstantInt::get(Type::getInt32Ty(*Context), 0U, false), II->getName());
1853 InsertNewInstBefore(New, *II);
1858 // Output elements are undefined if both are undefined. Consider things
1859 // like undef&0. The result is known zero, not undef.
1860 UndefElts &= UndefElts2;
1866 return MadeChange ? I : 0;
1870 /// AssociativeOpt - Perform an optimization on an associative operator. This
1871 /// function is designed to check a chain of associative operators for a
1872 /// potential to apply a certain optimization. Since the optimization may be
1873 /// applicable if the expression was reassociated, this checks the chain, then
1874 /// reassociates the expression as necessary to expose the optimization
1875 /// opportunity. This makes use of a special Functor, which must define
1876 /// 'shouldApply' and 'apply' methods.
1878 template<typename Functor>
1879 static Instruction *AssociativeOpt(BinaryOperator &Root, const Functor &F) {
1880 unsigned Opcode = Root.getOpcode();
1881 Value *LHS = Root.getOperand(0);
1883 // Quick check, see if the immediate LHS matches...
1884 if (F.shouldApply(LHS))
1885 return F.apply(Root);
1887 // Otherwise, if the LHS is not of the same opcode as the root, return.
1888 Instruction *LHSI = dyn_cast<Instruction>(LHS);
1889 while (LHSI && LHSI->getOpcode() == Opcode && LHSI->hasOneUse()) {
1890 // Should we apply this transform to the RHS?
1891 bool ShouldApply = F.shouldApply(LHSI->getOperand(1));
1893 // If not to the RHS, check to see if we should apply to the LHS...
1894 if (!ShouldApply && F.shouldApply(LHSI->getOperand(0))) {
1895 cast<BinaryOperator>(LHSI)->swapOperands(); // Make the LHS the RHS
1899 // If the functor wants to apply the optimization to the RHS of LHSI,
1900 // reassociate the expression from ((? op A) op B) to (? op (A op B))
1902 // Now all of the instructions are in the current basic block, go ahead
1903 // and perform the reassociation.
1904 Instruction *TmpLHSI = cast<Instruction>(Root.getOperand(0));
1906 // First move the selected RHS to the LHS of the root...
1907 Root.setOperand(0, LHSI->getOperand(1));
1909 // Make what used to be the LHS of the root be the user of the root...
1910 Value *ExtraOperand = TmpLHSI->getOperand(1);
1911 if (&Root == TmpLHSI) {
1912 Root.replaceAllUsesWith(Constant::getNullValue(TmpLHSI->getType()));
1915 Root.replaceAllUsesWith(TmpLHSI); // Users now use TmpLHSI
1916 TmpLHSI->setOperand(1, &Root); // TmpLHSI now uses the root
1917 BasicBlock::iterator ARI = &Root; ++ARI;
1918 TmpLHSI->moveBefore(ARI); // Move TmpLHSI to after Root
1921 // Now propagate the ExtraOperand down the chain of instructions until we
1923 while (TmpLHSI != LHSI) {
1924 Instruction *NextLHSI = cast<Instruction>(TmpLHSI->getOperand(0));
1925 // Move the instruction to immediately before the chain we are
1926 // constructing to avoid breaking dominance properties.
1927 NextLHSI->moveBefore(ARI);
1930 Value *NextOp = NextLHSI->getOperand(1);
1931 NextLHSI->setOperand(1, ExtraOperand);
1933 ExtraOperand = NextOp;
1936 // Now that the instructions are reassociated, have the functor perform
1937 // the transformation...
1938 return F.apply(Root);
1941 LHSI = dyn_cast<Instruction>(LHSI->getOperand(0));
1948 // AddRHS - Implements: X + X --> X << 1
1951 explicit AddRHS(Value *rhs) : RHS(rhs) {}
1952 bool shouldApply(Value *LHS) const { return LHS == RHS; }
1953 Instruction *apply(BinaryOperator &Add) const {
1954 return BinaryOperator::CreateShl(Add.getOperand(0),
1955 ConstantInt::get(Add.getType(), 1));
1959 // AddMaskingAnd - Implements (A & C1)+(B & C2) --> (A & C1)|(B & C2)
1961 struct AddMaskingAnd {
1963 explicit AddMaskingAnd(Constant *c) : C2(c) {}
1964 bool shouldApply(Value *LHS) const {
1966 return match(LHS, m_And(m_Value(), m_ConstantInt(C1))) &&
1967 ConstantExpr::getAnd(C1, C2)->isNullValue();
1969 Instruction *apply(BinaryOperator &Add) const {
1970 return BinaryOperator::CreateOr(Add.getOperand(0), Add.getOperand(1));
1976 static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO,
1978 if (CastInst *CI = dyn_cast<CastInst>(&I))
1979 return IC->Builder->CreateCast(CI->getOpcode(), SO, I.getType());
1981 // Figure out if the constant is the left or the right argument.
1982 bool ConstIsRHS = isa<Constant>(I.getOperand(1));
1983 Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS));
1985 if (Constant *SOC = dyn_cast<Constant>(SO)) {
1987 return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand);
1988 return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC);
1991 Value *Op0 = SO, *Op1 = ConstOperand;
1993 std::swap(Op0, Op1);
1995 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I))
1996 return IC->Builder->CreateBinOp(BO->getOpcode(), Op0, Op1,
1997 SO->getName()+".op");
1998 if (ICmpInst *CI = dyn_cast<ICmpInst>(&I))
1999 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
2000 SO->getName()+".cmp");
2001 if (FCmpInst *CI = dyn_cast<FCmpInst>(&I))
2002 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
2003 SO->getName()+".cmp");
2004 llvm_unreachable("Unknown binary instruction type!");
2007 // FoldOpIntoSelect - Given an instruction with a select as one operand and a
2008 // constant as the other operand, try to fold the binary operator into the
2009 // select arguments. This also works for Cast instructions, which obviously do
2010 // not have a second operand.
2011 static Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI,
2013 // Don't modify shared select instructions
2014 if (!SI->hasOneUse()) return 0;
2015 Value *TV = SI->getOperand(1);
2016 Value *FV = SI->getOperand(2);
2018 if (isa<Constant>(TV) || isa<Constant>(FV)) {
2019 // Bool selects with constant operands can be folded to logical ops.
2020 if (SI->getType() == Type::getInt1Ty(*IC->getContext())) return 0;
2022 Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, IC);
2023 Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, IC);
2025 return SelectInst::Create(SI->getCondition(), SelectTrueVal,
2032 /// FoldOpIntoPhi - Given a binary operator, cast instruction, or select which
2033 /// has a PHI node as operand #0, see if we can fold the instruction into the
2034 /// PHI (which is only possible if all operands to the PHI are constants).
2036 /// If AllowAggressive is true, FoldOpIntoPhi will allow certain transforms
2037 /// that would normally be unprofitable because they strongly encourage jump
2039 Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I,
2040 bool AllowAggressive) {
2041 AllowAggressive = false;
2042 PHINode *PN = cast<PHINode>(I.getOperand(0));
2043 unsigned NumPHIValues = PN->getNumIncomingValues();
2044 if (NumPHIValues == 0 ||
2045 // We normally only transform phis with a single use, unless we're trying
2046 // hard to make jump threading happen.
2047 (!PN->hasOneUse() && !AllowAggressive))
2051 // Check to see if all of the operands of the PHI are simple constants
2052 // (constantint/constantfp/undef). If there is one non-constant value,
2053 // remember the BB it is in. If there is more than one or if *it* is a PHI,
2054 // bail out. We don't do arbitrary constant expressions here because moving
2055 // their computation can be expensive without a cost model.
2056 BasicBlock *NonConstBB = 0;
2057 for (unsigned i = 0; i != NumPHIValues; ++i)
2058 if (!isa<Constant>(PN->getIncomingValue(i)) ||
2059 isa<ConstantExpr>(PN->getIncomingValue(i))) {
2060 if (NonConstBB) return 0; // More than one non-const value.
2061 if (isa<PHINode>(PN->getIncomingValue(i))) return 0; // Itself a phi.
2062 NonConstBB = PN->getIncomingBlock(i);
2064 // If the incoming non-constant value is in I's block, we have an infinite
2066 if (NonConstBB == I.getParent())
2070 // If there is exactly one non-constant value, we can insert a copy of the
2071 // operation in that block. However, if this is a critical edge, we would be
2072 // inserting the computation one some other paths (e.g. inside a loop). Only
2073 // do this if the pred block is unconditionally branching into the phi block.
2074 if (NonConstBB != 0 && !AllowAggressive) {
2075 BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator());
2076 if (!BI || !BI->isUnconditional()) return 0;
2079 // Okay, we can do the transformation: create the new PHI node.
2080 PHINode *NewPN = PHINode::Create(I.getType(), "");
2081 NewPN->reserveOperandSpace(PN->getNumOperands()/2);
2082 InsertNewInstBefore(NewPN, *PN);
2083 NewPN->takeName(PN);
2085 // Next, add all of the operands to the PHI.
2086 if (SelectInst *SI = dyn_cast<SelectInst>(&I)) {
2087 // We only currently try to fold the condition of a select when it is a phi,
2088 // not the true/false values.
2089 Value *TrueV = SI->getTrueValue();
2090 Value *FalseV = SI->getFalseValue();
2091 BasicBlock *PhiTransBB = PN->getParent();
2092 for (unsigned i = 0; i != NumPHIValues; ++i) {
2093 BasicBlock *ThisBB = PN->getIncomingBlock(i);
2094 Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB);
2095 Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB);
2097 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
2098 InV = InC->isNullValue() ? FalseVInPred : TrueVInPred;
2100 assert(PN->getIncomingBlock(i) == NonConstBB);
2101 InV = SelectInst::Create(PN->getIncomingValue(i), TrueVInPred,
2103 "phitmp", NonConstBB->getTerminator());
2104 Worklist.Add(cast<Instruction>(InV));
2106 NewPN->addIncoming(InV, ThisBB);
2108 } else if (I.getNumOperands() == 2) {
2109 Constant *C = cast<Constant>(I.getOperand(1));
2110 for (unsigned i = 0; i != NumPHIValues; ++i) {
2112 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
2113 if (CmpInst *CI = dyn_cast<CmpInst>(&I))
2114 InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C);
2116 InV = ConstantExpr::get(I.getOpcode(), InC, C);
2118 assert(PN->getIncomingBlock(i) == NonConstBB);
2119 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I))
2120 InV = BinaryOperator::Create(BO->getOpcode(),
2121 PN->getIncomingValue(i), C, "phitmp",
2122 NonConstBB->getTerminator());
2123 else if (CmpInst *CI = dyn_cast<CmpInst>(&I))
2124 InV = CmpInst::Create(CI->getOpcode(),
2126 PN->getIncomingValue(i), C, "phitmp",
2127 NonConstBB->getTerminator());
2129 llvm_unreachable("Unknown binop!");
2131 Worklist.Add(cast<Instruction>(InV));
2133 NewPN->addIncoming(InV, PN->getIncomingBlock(i));
2136 CastInst *CI = cast<CastInst>(&I);
2137 const Type *RetTy = CI->getType();
2138 for (unsigned i = 0; i != NumPHIValues; ++i) {
2140 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
2141 InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy);
2143 assert(PN->getIncomingBlock(i) == NonConstBB);
2144 InV = CastInst::Create(CI->getOpcode(), PN->getIncomingValue(i),
2145 I.getType(), "phitmp",
2146 NonConstBB->getTerminator());
2147 Worklist.Add(cast<Instruction>(InV));
2149 NewPN->addIncoming(InV, PN->getIncomingBlock(i));
2152 return ReplaceInstUsesWith(I, NewPN);
2156 /// WillNotOverflowSignedAdd - Return true if we can prove that:
2157 /// (sext (add LHS, RHS)) === (add (sext LHS), (sext RHS))
2158 /// This basically requires proving that the add in the original type would not
2159 /// overflow to change the sign bit or have a carry out.
2160 bool InstCombiner::WillNotOverflowSignedAdd(Value *LHS, Value *RHS) {
2161 // There are different heuristics we can use for this. Here are some simple
2164 // Add has the property that adding any two 2's complement numbers can only
2165 // have one carry bit which can change a sign. As such, if LHS and RHS each
2166 // have at least two sign bits, we know that the addition of the two values will
2167 // sign extend fine.
2168 if (ComputeNumSignBits(LHS) > 1 && ComputeNumSignBits(RHS) > 1)
2172 // If one of the operands only has one non-zero bit, and if the other operand
2173 // has a known-zero bit in a more significant place than it (not including the
2174 // sign bit) the ripple may go up to and fill the zero, but won't change the
2175 // sign. For example, (X & ~4) + 1.
2183 Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
2184 bool Changed = SimplifyCommutative(I);
2185 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
2187 if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
2188 // X + undef -> undef
2189 if (isa<UndefValue>(RHS))
2190 return ReplaceInstUsesWith(I, RHS);
2193 if (RHSC->isNullValue())
2194 return ReplaceInstUsesWith(I, LHS);
2196 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHSC)) {
2197 // X + (signbit) --> X ^ signbit
2198 const APInt& Val = CI->getValue();
2199 uint32_t BitWidth = Val.getBitWidth();
2200 if (Val == APInt::getSignBit(BitWidth))
2201 return BinaryOperator::CreateXor(LHS, RHS);
2203 // See if SimplifyDemandedBits can simplify this. This handles stuff like
2204 // (X & 254)+1 -> (X&254)|1
2205 if (SimplifyDemandedInstructionBits(I))
2208 // zext(bool) + C -> bool ? C + 1 : C
2209 if (ZExtInst *ZI = dyn_cast<ZExtInst>(LHS))
2210 if (ZI->getSrcTy() == Type::getInt1Ty(*Context))
2211 return SelectInst::Create(ZI->getOperand(0), AddOne(CI), CI);
2214 if (isa<PHINode>(LHS))
2215 if (Instruction *NV = FoldOpIntoPhi(I))
2218 ConstantInt *XorRHS = 0;
2220 if (isa<ConstantInt>(RHSC) &&
2221 match(LHS, m_Xor(m_Value(XorLHS), m_ConstantInt(XorRHS)))) {
2222 uint32_t TySizeBits = I.getType()->getScalarSizeInBits();
2223 const APInt& RHSVal = cast<ConstantInt>(RHSC)->getValue();
2225 uint32_t Size = TySizeBits / 2;
2226 APInt C0080Val(APInt(TySizeBits, 1ULL).shl(Size - 1));
2227 APInt CFF80Val(-C0080Val);
2229 if (TySizeBits > Size) {
2230 // If we have ADD(XOR(AND(X, 0xFF), 0x80), 0xF..F80), it's a sext.
2231 // If we have ADD(XOR(AND(X, 0xFF), 0xF..F80), 0x80), it's a sext.
2232 if ((RHSVal == CFF80Val && XorRHS->getValue() == C0080Val) ||
2233 (RHSVal == C0080Val && XorRHS->getValue() == CFF80Val)) {
2234 // This is a sign extend if the top bits are known zero.
2235 if (!MaskedValueIsZero(XorLHS,
2236 APInt::getHighBitsSet(TySizeBits, TySizeBits - Size)))
2237 Size = 0; // Not a sign ext, but can't be any others either.
2242 C0080Val = APIntOps::lshr(C0080Val, Size);
2243 CFF80Val = APIntOps::ashr(CFF80Val, Size);
2244 } while (Size >= 1);
2246 // FIXME: This shouldn't be necessary. When the backends can handle types
2247 // with funny bit widths then this switch statement should be removed. It
2248 // is just here to get the size of the "middle" type back up to something
2249 // that the back ends can handle.
2250 const Type *MiddleType = 0;
2253 case 32: MiddleType = Type::getInt32Ty(*Context); break;
2254 case 16: MiddleType = Type::getInt16Ty(*Context); break;
2255 case 8: MiddleType = Type::getInt8Ty(*Context); break;
2258 Value *NewTrunc = Builder->CreateTrunc(XorLHS, MiddleType, "sext");
2259 return new SExtInst(NewTrunc, I.getType(), I.getName());
2264 if (I.getType() == Type::getInt1Ty(*Context))
2265 return BinaryOperator::CreateXor(LHS, RHS);
2268 if (I.getType()->isInteger()) {
2269 if (Instruction *Result = AssociativeOpt(I, AddRHS(RHS)))
2272 if (Instruction *RHSI = dyn_cast<Instruction>(RHS)) {
2273 if (RHSI->getOpcode() == Instruction::Sub)
2274 if (LHS == RHSI->getOperand(1)) // A + (B - A) --> B
2275 return ReplaceInstUsesWith(I, RHSI->getOperand(0));
2277 if (Instruction *LHSI = dyn_cast<Instruction>(LHS)) {
2278 if (LHSI->getOpcode() == Instruction::Sub)
2279 if (RHS == LHSI->getOperand(1)) // (B - A) + A --> B
2280 return ReplaceInstUsesWith(I, LHSI->getOperand(0));
2285 // -A + -B --> -(A + B)
2286 if (Value *LHSV = dyn_castNegVal(LHS)) {
2287 if (LHS->getType()->isIntOrIntVector()) {
2288 if (Value *RHSV = dyn_castNegVal(RHS)) {
2289 Value *NewAdd = Builder->CreateAdd(LHSV, RHSV, "sum");
2290 return BinaryOperator::CreateNeg(NewAdd);
2294 return BinaryOperator::CreateSub(RHS, LHSV);
2298 if (!isa<Constant>(RHS))
2299 if (Value *V = dyn_castNegVal(RHS))
2300 return BinaryOperator::CreateSub(LHS, V);
2304 if (Value *X = dyn_castFoldableMul(LHS, C2)) {
2305 if (X == RHS) // X*C + X --> X * (C+1)
2306 return BinaryOperator::CreateMul(RHS, AddOne(C2));
2308 // X*C1 + X*C2 --> X * (C1+C2)
2310 if (X == dyn_castFoldableMul(RHS, C1))
2311 return BinaryOperator::CreateMul(X, ConstantExpr::getAdd(C1, C2));
2314 // X + X*C --> X * (C+1)
2315 if (dyn_castFoldableMul(RHS, C2) == LHS)
2316 return BinaryOperator::CreateMul(LHS, AddOne(C2));
2318 // X + ~X --> -1 since ~X = -X-1
2319 if (dyn_castNotVal(LHS) == RHS ||
2320 dyn_castNotVal(RHS) == LHS)
2321 return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
2324 // (A & C1)+(B & C2) --> (A & C1)|(B & C2) iff C1&C2 == 0
2325 if (match(RHS, m_And(m_Value(), m_ConstantInt(C2))))
2326 if (Instruction *R = AssociativeOpt(I, AddMaskingAnd(C2)))
2329 // A+B --> A|B iff A and B have no bits set in common.
2330 if (const IntegerType *IT = dyn_cast<IntegerType>(I.getType())) {
2331 APInt Mask = APInt::getAllOnesValue(IT->getBitWidth());
2332 APInt LHSKnownOne(IT->getBitWidth(), 0);
2333 APInt LHSKnownZero(IT->getBitWidth(), 0);
2334 ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne);
2335 if (LHSKnownZero != 0) {
2336 APInt RHSKnownOne(IT->getBitWidth(), 0);
2337 APInt RHSKnownZero(IT->getBitWidth(), 0);
2338 ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne);
2340 // No bits in common -> bitwise or.
2341 if ((LHSKnownZero|RHSKnownZero).isAllOnesValue())
2342 return BinaryOperator::CreateOr(LHS, RHS);
2346 // W*X + Y*Z --> W * (X+Z) iff W == Y
2347 if (I.getType()->isIntOrIntVector()) {
2348 Value *W, *X, *Y, *Z;
2349 if (match(LHS, m_Mul(m_Value(W), m_Value(X))) &&
2350 match(RHS, m_Mul(m_Value(Y), m_Value(Z)))) {
2354 } else if (Y == X) {
2356 } else if (X == Z) {
2363 Value *NewAdd = Builder->CreateAdd(X, Z, LHS->getName());
2364 return BinaryOperator::CreateMul(W, NewAdd);
2369 if (ConstantInt *CRHS = dyn_cast<ConstantInt>(RHS)) {
2371 if (match(LHS, m_Not(m_Value(X)))) // ~X + C --> (C-1) - X
2372 return BinaryOperator::CreateSub(SubOne(CRHS), X);
2374 // (X & FF00) + xx00 -> (X+xx00) & FF00
2375 if (LHS->hasOneUse() &&
2376 match(LHS, m_And(m_Value(X), m_ConstantInt(C2)))) {
2377 Constant *Anded = ConstantExpr::getAnd(CRHS, C2);
2378 if (Anded == CRHS) {
2379 // See if all bits from the first bit set in the Add RHS up are included
2380 // in the mask. First, get the rightmost bit.
2381 const APInt& AddRHSV = CRHS->getValue();
2383 // Form a mask of all bits from the lowest bit added through the top.
2384 APInt AddRHSHighBits(~((AddRHSV & -AddRHSV)-1));
2386 // See if the and mask includes all of these bits.
2387 APInt AddRHSHighBitsAnd(AddRHSHighBits & C2->getValue());
2389 if (AddRHSHighBits == AddRHSHighBitsAnd) {
2390 // Okay, the xform is safe. Insert the new add pronto.
2391 Value *NewAdd = Builder->CreateAdd(X, CRHS, LHS->getName());
2392 return BinaryOperator::CreateAnd(NewAdd, C2);
2397 // Try to fold constant add into select arguments.
2398 if (SelectInst *SI = dyn_cast<SelectInst>(LHS))
2399 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
2403 // add (select X 0 (sub n A)) A --> select X A n
2405 SelectInst *SI = dyn_cast<SelectInst>(LHS);
2408 SI = dyn_cast<SelectInst>(RHS);
2411 if (SI && SI->hasOneUse()) {
2412 Value *TV = SI->getTrueValue();
2413 Value *FV = SI->getFalseValue();
2416 // Can we fold the add into the argument of the select?
2417 // We check both true and false select arguments for a matching subtract.
2418 if (match(FV, m_Zero()) &&
2419 match(TV, m_Sub(m_Value(N), m_Specific(A))))
2420 // Fold the add into the true select value.
2421 return SelectInst::Create(SI->getCondition(), N, A);
2422 if (match(TV, m_Zero()) &&
2423 match(FV, m_Sub(m_Value(N), m_Specific(A))))
2424 // Fold the add into the false select value.
2425 return SelectInst::Create(SI->getCondition(), A, N);
2429 // Check for (add (sext x), y), see if we can merge this into an
2430 // integer add followed by a sext.
2431 if (SExtInst *LHSConv = dyn_cast<SExtInst>(LHS)) {
2432 // (add (sext x), cst) --> (sext (add x, cst'))
2433 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) {
2435 ConstantExpr::getTrunc(RHSC, LHSConv->getOperand(0)->getType());
2436 if (LHSConv->hasOneUse() &&
2437 ConstantExpr::getSExt(CI, I.getType()) == RHSC &&
2438 WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI)) {
2439 // Insert the new, smaller add.
2440 Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
2442 return new SExtInst(NewAdd, I.getType());
2446 // (add (sext x), (sext y)) --> (sext (add int x, y))
2447 if (SExtInst *RHSConv = dyn_cast<SExtInst>(RHS)) {
2448 // Only do this if x/y have the same type, if at last one of them has a
2449 // single use (so we don't increase the number of sexts), and if the
2450 // integer add will not overflow.
2451 if (LHSConv->getOperand(0)->getType()==RHSConv->getOperand(0)->getType()&&
2452 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
2453 WillNotOverflowSignedAdd(LHSConv->getOperand(0),
2454 RHSConv->getOperand(0))) {
2455 // Insert the new integer add.
2456 Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
2457 RHSConv->getOperand(0), "addconv");
2458 return new SExtInst(NewAdd, I.getType());
2463 return Changed ? &I : 0;
2466 Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
2467 bool Changed = SimplifyCommutative(I);
2468 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
2470 if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
2472 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHSC)) {
2473 if (CFP->isExactlyValue(ConstantFP::getNegativeZero
2474 (I.getType())->getValueAPF()))
2475 return ReplaceInstUsesWith(I, LHS);
2478 if (isa<PHINode>(LHS))
2479 if (Instruction *NV = FoldOpIntoPhi(I))
2484 // -A + -B --> -(A + B)
2485 if (Value *LHSV = dyn_castFNegVal(LHS))
2486 return BinaryOperator::CreateFSub(RHS, LHSV);
2489 if (!isa<Constant>(RHS))
2490 if (Value *V = dyn_castFNegVal(RHS))
2491 return BinaryOperator::CreateFSub(LHS, V);
2493 // Check for X+0.0. Simplify it to X if we know X is not -0.0.
2494 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS))
2495 if (CFP->getValueAPF().isPosZero() && CannotBeNegativeZero(LHS))
2496 return ReplaceInstUsesWith(I, LHS);
2498 // Check for (add double (sitofp x), y), see if we can merge this into an
2499 // integer add followed by a promotion.
2500 if (SIToFPInst *LHSConv = dyn_cast<SIToFPInst>(LHS)) {
2501 // (add double (sitofp x), fpcst) --> (sitofp (add int x, intcst))
2502 // ... if the constant fits in the integer value. This is useful for things
2503 // like (double)(x & 1234) + 4.0 -> (double)((X & 1234)+4) which no longer
2504 // requires a constant pool load, and generally allows the add to be better
2506 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS)) {
2508 ConstantExpr::getFPToSI(CFP, LHSConv->getOperand(0)->getType());
2509 if (LHSConv->hasOneUse() &&
2510 ConstantExpr::getSIToFP(CI, I.getType()) == CFP &&
2511 WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI)) {
2512 // Insert the new integer add.
2513 Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
2515 return new SIToFPInst(NewAdd, I.getType());
2519 // (add double (sitofp x), (sitofp y)) --> (sitofp (add int x, y))
2520 if (SIToFPInst *RHSConv = dyn_cast<SIToFPInst>(RHS)) {
2521 // Only do this if x/y have the same type, if at last one of them has a
2522 // single use (so we don't increase the number of int->fp conversions),
2523 // and if the integer add will not overflow.
2524 if (LHSConv->getOperand(0)->getType()==RHSConv->getOperand(0)->getType()&&
2525 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
2526 WillNotOverflowSignedAdd(LHSConv->getOperand(0),
2527 RHSConv->getOperand(0))) {
2528 // Insert the new integer add.
2529 Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
2530 RHSConv->getOperand(0),"addconv");
2531 return new SIToFPInst(NewAdd, I.getType());
2536 return Changed ? &I : 0;
2540 /// EmitGEPOffset - Given a getelementptr instruction/constantexpr, emit the
2541 /// code necessary to compute the offset from the base pointer (without adding
2542 /// in the base pointer). Return the result as a signed integer of intptr size.
2543 static Value *EmitGEPOffset(User *GEP, InstCombiner &IC) {
2544 TargetData &TD = *IC.getTargetData();
2545 gep_type_iterator GTI = gep_type_begin(GEP);
2546 const Type *IntPtrTy = TD.getIntPtrType(GEP->getContext());
2547 Value *Result = Constant::getNullValue(IntPtrTy);
2549 // Build a mask for high order bits.
2550 unsigned IntPtrWidth = TD.getPointerSizeInBits();
2551 uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
2553 for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
2556 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
2557 if (ConstantInt *OpC = dyn_cast<ConstantInt>(Op)) {
2558 if (OpC->isZero()) continue;
2560 // Handle a struct index, which adds its field offset to the pointer.
2561 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
2562 Size = TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
2564 Result = IC.Builder->CreateAdd(Result,
2565 ConstantInt::get(IntPtrTy, Size),
2566 GEP->getName()+".offs");
2570 Constant *Scale = ConstantInt::get(IntPtrTy, Size);
2572 ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
2573 Scale = ConstantExpr::getMul(OC, Scale);
2574 // Emit an add instruction.
2575 Result = IC.Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
2578 // Convert to correct type.
2579 if (Op->getType() != IntPtrTy)
2580 Op = IC.Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
2582 Constant *Scale = ConstantInt::get(IntPtrTy, Size);
2583 // We'll let instcombine(mul) convert this to a shl if possible.
2584 Op = IC.Builder->CreateMul(Op, Scale, GEP->getName()+".idx");
2587 // Emit an add instruction.
2588 Result = IC.Builder->CreateAdd(Op, Result, GEP->getName()+".offs");
2594 /// EvaluateGEPOffsetExpression - Return a value that can be used to compare
2595 /// the *offset* implied by a GEP to zero. For example, if we have &A[i], we
2596 /// want to return 'i' for "icmp ne i, 0". Note that, in general, indices can
2597 /// be complex, and scales are involved. The above expression would also be
2598 /// legal to codegen as "icmp ne (i*4), 0" (assuming A is a pointer to i32).
2599 /// This later form is less amenable to optimization though, and we are allowed
2600 /// to generate the first by knowing that pointer arithmetic doesn't overflow.
2602 /// If we can't emit an optimized form for this expression, this returns null.
2604 static Value *EvaluateGEPOffsetExpression(User *GEP, Instruction &I,
2606 TargetData &TD = *IC.getTargetData();
2607 gep_type_iterator GTI = gep_type_begin(GEP);
2609 // Check to see if this gep only has a single variable index. If so, and if
2610 // any constant indices are a multiple of its scale, then we can compute this
2611 // in terms of the scale of the variable index. For example, if the GEP
2612 // implies an offset of "12 + i*4", then we can codegen this as "3 + i",
2613 // because the expression will cross zero at the same point.
2614 unsigned i, e = GEP->getNumOperands();
2616 for (i = 1; i != e; ++i, ++GTI) {
2617 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
2618 // Compute the aggregate offset of constant indices.
2619 if (CI->isZero()) continue;
2621 // Handle a struct index, which adds its field offset to the pointer.
2622 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
2623 Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
2625 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
2626 Offset += Size*CI->getSExtValue();
2629 // Found our variable index.
2634 // If there are no variable indices, we must have a constant offset, just
2635 // evaluate it the general way.
2636 if (i == e) return 0;
2638 Value *VariableIdx = GEP->getOperand(i);
2639 // Determine the scale factor of the variable element. For example, this is
2640 // 4 if the variable index is into an array of i32.
2641 uint64_t VariableScale = TD.getTypeAllocSize(GTI.getIndexedType());
2643 // Verify that there are no other variable indices. If so, emit the hard way.
2644 for (++i, ++GTI; i != e; ++i, ++GTI) {
2645 ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i));
2648 // Compute the aggregate offset of constant indices.
2649 if (CI->isZero()) continue;
2651 // Handle a struct index, which adds its field offset to the pointer.
2652 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
2653 Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
2655 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
2656 Offset += Size*CI->getSExtValue();
2660 // Okay, we know we have a single variable index, which must be a
2661 // pointer/array/vector index. If there is no offset, life is simple, return
2663 unsigned IntPtrWidth = TD.getPointerSizeInBits();
2665 // Cast to intptrty in case a truncation occurs. If an extension is needed,
2666 // we don't need to bother extending: the extension won't affect where the
2667 // computation crosses zero.
2668 if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth)
2669 VariableIdx = new TruncInst(VariableIdx,
2670 TD.getIntPtrType(VariableIdx->getContext()),
2671 VariableIdx->getName(), &I);
2675 // Otherwise, there is an index. The computation we will do will be modulo
2676 // the pointer size, so get it.
2677 uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
2679 Offset &= PtrSizeMask;
2680 VariableScale &= PtrSizeMask;
2682 // To do this transformation, any constant index must be a multiple of the
2683 // variable scale factor. For example, we can evaluate "12 + 4*i" as "3 + i",
2684 // but we can't evaluate "10 + 3*i" in terms of i. Check that the offset is a
2685 // multiple of the variable scale.
2686 int64_t NewOffs = Offset / (int64_t)VariableScale;
2687 if (Offset != NewOffs*(int64_t)VariableScale)
2690 // Okay, we can do this evaluation. Start by converting the index to intptr.
2691 const Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext());
2692 if (VariableIdx->getType() != IntPtrTy)
2693 VariableIdx = CastInst::CreateIntegerCast(VariableIdx, IntPtrTy,
2695 VariableIdx->getName(), &I);
2696 Constant *OffsetVal = ConstantInt::get(IntPtrTy, NewOffs);
2697 return BinaryOperator::CreateAdd(VariableIdx, OffsetVal, "offset", &I);
2701 /// Optimize pointer differences into the same array into a size. Consider:
2702 /// &A[10] - &A[0]: we should compile this to "10". LHS/RHS are the pointer
2703 /// operands to the ptrtoint instructions for the LHS/RHS of the subtract.
2705 Value *InstCombiner::OptimizePointerDifference(Value *LHS, Value *RHS,
2707 assert(TD && "Must have target data info for this");
2709 // If LHS is a gep based on RHS or RHS is a gep based on LHS, we can optimize
2712 GetElementPtrInst *GEP;
2714 if ((GEP = dyn_cast<GetElementPtrInst>(LHS)) &&
2715 GEP->getOperand(0) == RHS)
2717 else if ((GEP = dyn_cast<GetElementPtrInst>(RHS)) &&
2718 GEP->getOperand(0) == LHS)
2723 // TODO: Could also optimize &A[i] - &A[j] -> "i-j".
2725 // Emit the offset of the GEP and an intptr_t.
2726 Value *Result = EmitGEPOffset(GEP, *this);
2728 // If we have p - gep(p, ...) then we have to negate the result.
2730 Result = Builder->CreateNeg(Result, "diff.neg");
2732 return Builder->CreateIntCast(Result, Ty, true);
2736 Instruction *InstCombiner::visitSub(BinaryOperator &I) {
2737 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2739 if (Op0 == Op1) // sub X, X -> 0
2740 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
2742 // If this is a 'B = x-(-A)', change to B = x+A.
2743 if (Value *V = dyn_castNegVal(Op1))
2744 return BinaryOperator::CreateAdd(Op0, V);
2746 if (isa<UndefValue>(Op0))
2747 return ReplaceInstUsesWith(I, Op0); // undef - X -> undef
2748 if (isa<UndefValue>(Op1))
2749 return ReplaceInstUsesWith(I, Op1); // X - undef -> undef
2750 if (I.getType() == Type::getInt1Ty(*Context))
2751 return BinaryOperator::CreateXor(Op0, Op1);
2753 if (ConstantInt *C = dyn_cast<ConstantInt>(Op0)) {
2754 // Replace (-1 - A) with (~A).
2755 if (C->isAllOnesValue())
2756 return BinaryOperator::CreateNot(Op1);
2758 // C - ~X == X + (1+C)
2760 if (match(Op1, m_Not(m_Value(X))))
2761 return BinaryOperator::CreateAdd(X, AddOne(C));
2763 // -(X >>u 31) -> (X >>s 31)
2764 // -(X >>s 31) -> (X >>u 31)
2766 if (BinaryOperator *SI = dyn_cast<BinaryOperator>(Op1)) {
2767 if (SI->getOpcode() == Instruction::LShr) {
2768 if (ConstantInt *CU = dyn_cast<ConstantInt>(SI->getOperand(1))) {
2769 // Check to see if we are shifting out everything but the sign bit.
2770 if (CU->getLimitedValue(SI->getType()->getPrimitiveSizeInBits()) ==
2771 SI->getType()->getPrimitiveSizeInBits()-1) {
2772 // Ok, the transformation is safe. Insert AShr.
2773 return BinaryOperator::Create(Instruction::AShr,
2774 SI->getOperand(0), CU, SI->getName());
2777 } else if (SI->getOpcode() == Instruction::AShr) {
2778 if (ConstantInt *CU = dyn_cast<ConstantInt>(SI->getOperand(1))) {
2779 // Check to see if we are shifting out everything but the sign bit.
2780 if (CU->getLimitedValue(SI->getType()->getPrimitiveSizeInBits()) ==
2781 SI->getType()->getPrimitiveSizeInBits()-1) {
2782 // Ok, the transformation is safe. Insert LShr.
2783 return BinaryOperator::CreateLShr(
2784 SI->getOperand(0), CU, SI->getName());
2791 // Try to fold constant sub into select arguments.
2792 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
2793 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
2796 // C - zext(bool) -> bool ? C - 1 : C
2797 if (ZExtInst *ZI = dyn_cast<ZExtInst>(Op1))
2798 if (ZI->getSrcTy() == Type::getInt1Ty(*Context))
2799 return SelectInst::Create(ZI->getOperand(0), SubOne(C), C);
2802 if (BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1)) {
2803 if (Op1I->getOpcode() == Instruction::Add) {
2804 if (Op1I->getOperand(0) == Op0) // X-(X+Y) == -Y
2805 return BinaryOperator::CreateNeg(Op1I->getOperand(1),
2807 else if (Op1I->getOperand(1) == Op0) // X-(Y+X) == -Y
2808 return BinaryOperator::CreateNeg(Op1I->getOperand(0),
2810 else if (ConstantInt *CI1 = dyn_cast<ConstantInt>(I.getOperand(0))) {
2811 if (ConstantInt *CI2 = dyn_cast<ConstantInt>(Op1I->getOperand(1)))
2812 // C1-(X+C2) --> (C1-C2)-X
2813 return BinaryOperator::CreateSub(
2814 ConstantExpr::getSub(CI1, CI2), Op1I->getOperand(0));
2818 if (Op1I->hasOneUse()) {
2819 // Replace (x - (y - z)) with (x + (z - y)) if the (y - z) subexpression
2820 // is not used by anyone else...
2822 if (Op1I->getOpcode() == Instruction::Sub) {
2823 // Swap the two operands of the subexpr...
2824 Value *IIOp0 = Op1I->getOperand(0), *IIOp1 = Op1I->getOperand(1);
2825 Op1I->setOperand(0, IIOp1);
2826 Op1I->setOperand(1, IIOp0);
2828 // Create the new top level add instruction...
2829 return BinaryOperator::CreateAdd(Op0, Op1);
2832 // Replace (A - (A & B)) with (A & ~B) if this is the only use of (A&B)...
2834 if (Op1I->getOpcode() == Instruction::And &&
2835 (Op1I->getOperand(0) == Op0 || Op1I->getOperand(1) == Op0)) {
2836 Value *OtherOp = Op1I->getOperand(Op1I->getOperand(0) == Op0);
2838 Value *NewNot = Builder->CreateNot(OtherOp, "B.not");
2839 return BinaryOperator::CreateAnd(Op0, NewNot);
2842 // 0 - (X sdiv C) -> (X sdiv -C)
2843 if (Op1I->getOpcode() == Instruction::SDiv)
2844 if (ConstantInt *CSI = dyn_cast<ConstantInt>(Op0))
2846 if (Constant *DivRHS = dyn_cast<Constant>(Op1I->getOperand(1)))
2847 return BinaryOperator::CreateSDiv(Op1I->getOperand(0),
2848 ConstantExpr::getNeg(DivRHS));
2850 // X - X*C --> X * (1-C)
2851 ConstantInt *C2 = 0;
2852 if (dyn_castFoldableMul(Op1I, C2) == Op0) {
2854 ConstantExpr::getSub(ConstantInt::get(I.getType(), 1),
2856 return BinaryOperator::CreateMul(Op0, CP1);
2861 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
2862 if (Op0I->getOpcode() == Instruction::Add) {
2863 if (Op0I->getOperand(0) == Op1) // (Y+X)-Y == X
2864 return ReplaceInstUsesWith(I, Op0I->getOperand(1));
2865 else if (Op0I->getOperand(1) == Op1) // (X+Y)-Y == X
2866 return ReplaceInstUsesWith(I, Op0I->getOperand(0));
2867 } else if (Op0I->getOpcode() == Instruction::Sub) {
2868 if (Op0I->getOperand(0) == Op1) // (X-Y)-X == -Y
2869 return BinaryOperator::CreateNeg(Op0I->getOperand(1),
2875 if (Value *X = dyn_castFoldableMul(Op0, C1)) {
2876 if (X == Op1) // X*C - X --> X * (C-1)
2877 return BinaryOperator::CreateMul(Op1, SubOne(C1));
2879 ConstantInt *C2; // X*C1 - X*C2 -> X * (C1-C2)
2880 if (X == dyn_castFoldableMul(Op1, C2))
2881 return BinaryOperator::CreateMul(X, ConstantExpr::getSub(C1, C2));
2884 // Optimize pointer differences into the same array into a size. Consider:
2885 // &A[10] - &A[0]: we should compile this to "10".
2887 if (PtrToIntInst *LHS = dyn_cast<PtrToIntInst>(Op0))
2888 if (PtrToIntInst *RHS = dyn_cast<PtrToIntInst>(Op1))
2889 if (Value *Res = OptimizePointerDifference(LHS->getOperand(0),
2892 return ReplaceInstUsesWith(I, Res);
2894 // trunc(p)-trunc(q) -> trunc(p-q)
2895 if (TruncInst *LHST = dyn_cast<TruncInst>(Op0))
2896 if (TruncInst *RHST = dyn_cast<TruncInst>(Op1))
2897 if (PtrToIntInst *LHS = dyn_cast<PtrToIntInst>(LHST->getOperand(0)))
2898 if (PtrToIntInst *RHS = dyn_cast<PtrToIntInst>(RHST->getOperand(0)))
2899 if (Value *Res = OptimizePointerDifference(LHS->getOperand(0),
2902 return ReplaceInstUsesWith(I, Res);
2908 Instruction *InstCombiner::visitFSub(BinaryOperator &I) {
2909 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2911 // If this is a 'B = x-(-A)', change to B = x+A...
2912 if (Value *V = dyn_castFNegVal(Op1))
2913 return BinaryOperator::CreateFAdd(Op0, V);
2915 if (BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1)) {
2916 if (Op1I->getOpcode() == Instruction::FAdd) {
2917 if (Op1I->getOperand(0) == Op0) // X-(X+Y) == -Y
2918 return BinaryOperator::CreateFNeg(Op1I->getOperand(1),
2920 else if (Op1I->getOperand(1) == Op0) // X-(Y+X) == -Y
2921 return BinaryOperator::CreateFNeg(Op1I->getOperand(0),
2929 /// isSignBitCheck - Given an exploded icmp instruction, return true if the
2930 /// comparison only checks the sign bit. If it only checks the sign bit, set
2931 /// TrueIfSigned if the result of the comparison is true when the input value is
2933 static bool isSignBitCheck(ICmpInst::Predicate pred, ConstantInt *RHS,
2934 bool &TrueIfSigned) {
2936 case ICmpInst::ICMP_SLT: // True if LHS s< 0
2937 TrueIfSigned = true;
2938 return RHS->isZero();
2939 case ICmpInst::ICMP_SLE: // True if LHS s<= RHS and RHS == -1
2940 TrueIfSigned = true;
2941 return RHS->isAllOnesValue();
2942 case ICmpInst::ICMP_SGT: // True if LHS s> -1
2943 TrueIfSigned = false;
2944 return RHS->isAllOnesValue();
2945 case ICmpInst::ICMP_UGT:
2946 // True if LHS u> RHS and RHS == high-bit-mask - 1
2947 TrueIfSigned = true;
2948 return RHS->getValue() ==
2949 APInt::getSignedMaxValue(RHS->getType()->getPrimitiveSizeInBits());
2950 case ICmpInst::ICMP_UGE:
2951 // True if LHS u>= RHS and RHS == high-bit-mask (2^7, 2^15, 2^31, etc)
2952 TrueIfSigned = true;
2953 return RHS->getValue().isSignBit();
2959 Instruction *InstCombiner::visitMul(BinaryOperator &I) {
2960 bool Changed = SimplifyCommutative(I);
2961 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2963 if (isa<UndefValue>(Op1)) // undef * X -> 0
2964 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
2966 // Simplify mul instructions with a constant RHS.
2967 if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
2968 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1C)) {
2970 // ((X << C1)*C2) == (X * (C2 << C1))
2971 if (BinaryOperator *SI = dyn_cast<BinaryOperator>(Op0))
2972 if (SI->getOpcode() == Instruction::Shl)
2973 if (Constant *ShOp = dyn_cast<Constant>(SI->getOperand(1)))
2974 return BinaryOperator::CreateMul(SI->getOperand(0),
2975 ConstantExpr::getShl(CI, ShOp));
2978 return ReplaceInstUsesWith(I, Op1C); // X * 0 == 0
2979 if (CI->equalsInt(1)) // X * 1 == X
2980 return ReplaceInstUsesWith(I, Op0);
2981 if (CI->isAllOnesValue()) // X * -1 == 0 - X
2982 return BinaryOperator::CreateNeg(Op0, I.getName());
2984 const APInt& Val = cast<ConstantInt>(CI)->getValue();
2985 if (Val.isPowerOf2()) { // Replace X*(2^C) with X << C
2986 return BinaryOperator::CreateShl(Op0,
2987 ConstantInt::get(Op0->getType(), Val.logBase2()));
2989 } else if (isa<VectorType>(Op1C->getType())) {
2990 if (Op1C->isNullValue())
2991 return ReplaceInstUsesWith(I, Op1C);
2993 if (ConstantVector *Op1V = dyn_cast<ConstantVector>(Op1C)) {
2994 if (Op1V->isAllOnesValue()) // X * -1 == 0 - X
2995 return BinaryOperator::CreateNeg(Op0, I.getName());
2997 // As above, vector X*splat(1.0) -> X in all defined cases.
2998 if (Constant *Splat = Op1V->getSplatValue()) {
2999 if (ConstantInt *CI = dyn_cast<ConstantInt>(Splat))
3000 if (CI->equalsInt(1))
3001 return ReplaceInstUsesWith(I, Op0);
3006 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0))
3007 if (Op0I->getOpcode() == Instruction::Add && Op0I->hasOneUse() &&
3008 isa<ConstantInt>(Op0I->getOperand(1)) && isa<ConstantInt>(Op1C)) {
3009 // Canonicalize (X+C1)*C2 -> X*C2+C1*C2.
3010 Value *Add = Builder->CreateMul(Op0I->getOperand(0), Op1C, "tmp");
3011 Value *C1C2 = Builder->CreateMul(Op1C, Op0I->getOperand(1));
3012 return BinaryOperator::CreateAdd(Add, C1C2);
3016 // Try to fold constant mul into select arguments.
3017 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
3018 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
3021 if (isa<PHINode>(Op0))
3022 if (Instruction *NV = FoldOpIntoPhi(I))
3026 if (Value *Op0v = dyn_castNegVal(Op0)) // -X * -Y = X*Y
3027 if (Value *Op1v = dyn_castNegVal(Op1))
3028 return BinaryOperator::CreateMul(Op0v, Op1v);
3030 // (X / Y) * Y = X - (X % Y)
3031 // (X / Y) * -Y = (X % Y) - X
3034 BinaryOperator *BO = dyn_cast<BinaryOperator>(Op0);
3036 (BO->getOpcode() != Instruction::UDiv &&
3037 BO->getOpcode() != Instruction::SDiv)) {
3039 BO = dyn_cast<BinaryOperator>(Op1);
3041 Value *Neg = dyn_castNegVal(Op1C);
3042 if (BO && BO->hasOneUse() &&
3043 (BO->getOperand(1) == Op1C || BO->getOperand(1) == Neg) &&
3044 (BO->getOpcode() == Instruction::UDiv ||
3045 BO->getOpcode() == Instruction::SDiv)) {
3046 Value *Op0BO = BO->getOperand(0), *Op1BO = BO->getOperand(1);
3048 // If the division is exact, X % Y is zero.
3049 if (SDivOperator *SDiv = dyn_cast<SDivOperator>(BO))
3050 if (SDiv->isExact()) {
3052 return ReplaceInstUsesWith(I, Op0BO);
3053 return BinaryOperator::CreateNeg(Op0BO);
3057 if (BO->getOpcode() == Instruction::UDiv)
3058 Rem = Builder->CreateURem(Op0BO, Op1BO);
3060 Rem = Builder->CreateSRem(Op0BO, Op1BO);
3064 return BinaryOperator::CreateSub(Op0BO, Rem);
3065 return BinaryOperator::CreateSub(Rem, Op0BO);
3069 /// i1 mul -> i1 and.
3070 if (I.getType() == Type::getInt1Ty(*Context))
3071 return BinaryOperator::CreateAnd(Op0, Op1);
3073 // X*(1 << Y) --> X << Y
3074 // (1 << Y)*X --> X << Y
3077 if (match(Op0, m_Shl(m_One(), m_Value(Y))))
3078 return BinaryOperator::CreateShl(Op1, Y);
3079 if (match(Op1, m_Shl(m_One(), m_Value(Y))))
3080 return BinaryOperator::CreateShl(Op0, Y);
3083 // If one of the operands of the multiply is a cast from a boolean value, then
3084 // we know the bool is either zero or one, so this is a 'masking' multiply.
3085 // X * Y (where Y is 0 or 1) -> X & (0-Y)
3086 if (!isa<VectorType>(I.getType())) {
3087 // -2 is "-1 << 1" so it is all bits set except the low one.
3088 APInt Negative2(I.getType()->getPrimitiveSizeInBits(), (uint64_t)-2, true);
3090 Value *BoolCast = 0, *OtherOp = 0;
3091 if (MaskedValueIsZero(Op0, Negative2))
3092 BoolCast = Op0, OtherOp = Op1;
3093 else if (MaskedValueIsZero(Op1, Negative2))
3094 BoolCast = Op1, OtherOp = Op0;
3097 Value *V = Builder->CreateSub(Constant::getNullValue(I.getType()),
3099 return BinaryOperator::CreateAnd(V, OtherOp);
3103 return Changed ? &I : 0;
3106 Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
3107 bool Changed = SimplifyCommutative(I);
3108 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3110 // Simplify mul instructions with a constant RHS...
3111 if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
3112 if (ConstantFP *Op1F = dyn_cast<ConstantFP>(Op1C)) {
3113 // "In IEEE floating point, x*1 is not equivalent to x for nans. However,
3114 // ANSI says we can drop signals, so we can do this anyway." (from GCC)
3115 if (Op1F->isExactlyValue(1.0))
3116 return ReplaceInstUsesWith(I, Op0); // Eliminate 'mul double %X, 1.0'
3117 } else if (isa<VectorType>(Op1C->getType())) {
3118 if (ConstantVector *Op1V = dyn_cast<ConstantVector>(Op1C)) {
3119 // As above, vector X*splat(1.0) -> X in all defined cases.
3120 if (Constant *Splat = Op1V->getSplatValue()) {
3121 if (ConstantFP *F = dyn_cast<ConstantFP>(Splat))
3122 if (F->isExactlyValue(1.0))
3123 return ReplaceInstUsesWith(I, Op0);
3128 // Try to fold constant mul into select arguments.
3129 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
3130 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
3133 if (isa<PHINode>(Op0))
3134 if (Instruction *NV = FoldOpIntoPhi(I))
3138 if (Value *Op0v = dyn_castFNegVal(Op0)) // -X * -Y = X*Y
3139 if (Value *Op1v = dyn_castFNegVal(Op1))
3140 return BinaryOperator::CreateFMul(Op0v, Op1v);
3142 return Changed ? &I : 0;
3145 /// SimplifyDivRemOfSelect - Try to fold a divide or remainder of a select
3147 bool InstCombiner::SimplifyDivRemOfSelect(BinaryOperator &I) {
3148 SelectInst *SI = cast<SelectInst>(I.getOperand(1));
3150 // div/rem X, (Cond ? 0 : Y) -> div/rem X, Y
3151 int NonNullOperand = -1;
3152 if (Constant *ST = dyn_cast<Constant>(SI->getOperand(1)))
3153 if (ST->isNullValue())
3155 // div/rem X, (Cond ? Y : 0) -> div/rem X, Y
3156 if (Constant *ST = dyn_cast<Constant>(SI->getOperand(2)))
3157 if (ST->isNullValue())
3160 if (NonNullOperand == -1)
3163 Value *SelectCond = SI->getOperand(0);
3165 // Change the div/rem to use 'Y' instead of the select.
3166 I.setOperand(1, SI->getOperand(NonNullOperand));
3168 // Okay, we know we replace the operand of the div/rem with 'Y' with no
3169 // problem. However, the select, or the condition of the select may have
3170 // multiple uses. Based on our knowledge that the operand must be non-zero,
3171 // propagate the known value for the select into other uses of it, and
3172 // propagate a known value of the condition into its other users.
3174 // If the select and condition only have a single use, don't bother with this,
3176 if (SI->use_empty() && SelectCond->hasOneUse())
3179 // Scan the current block backward, looking for other uses of SI.
3180 BasicBlock::iterator BBI = &I, BBFront = I.getParent()->begin();
3182 while (BBI != BBFront) {
3184 // If we found a call to a function, we can't assume it will return, so
3185 // information from below it cannot be propagated above it.
3186 if (isa<CallInst>(BBI) && !isa<IntrinsicInst>(BBI))
3189 // Replace uses of the select or its condition with the known values.
3190 for (Instruction::op_iterator I = BBI->op_begin(), E = BBI->op_end();
3193 *I = SI->getOperand(NonNullOperand);
3195 } else if (*I == SelectCond) {
3196 *I = NonNullOperand == 1 ? ConstantInt::getTrue(*Context) :
3197 ConstantInt::getFalse(*Context);
3202 // If we past the instruction, quit looking for it.
3205 if (&*BBI == SelectCond)
3208 // If we ran out of things to eliminate, break out of the loop.
3209 if (SelectCond == 0 && SI == 0)
3217 /// This function implements the transforms on div instructions that work
3218 /// regardless of the kind of div instruction it is (udiv, sdiv, or fdiv). It is
3219 /// used by the visitors to those instructions.
3220 /// @brief Transforms common to all three div instructions
3221 Instruction *InstCombiner::commonDivTransforms(BinaryOperator &I) {
3222 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3224 // undef / X -> 0 for integer.
3225 // undef / X -> undef for FP (the undef could be a snan).
3226 if (isa<UndefValue>(Op0)) {
3227 if (Op0->getType()->isFPOrFPVector())
3228 return ReplaceInstUsesWith(I, Op0);
3229 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
3232 // X / undef -> undef
3233 if (isa<UndefValue>(Op1))
3234 return ReplaceInstUsesWith(I, Op1);
3239 /// This function implements the transforms common to both integer division
3240 /// instructions (udiv and sdiv). It is called by the visitors to those integer
3241 /// division instructions.
3242 /// @brief Common integer divide transforms
3243 Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
3244 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3246 // (sdiv X, X) --> 1 (udiv X, X) --> 1
3248 if (const VectorType *Ty = dyn_cast<VectorType>(I.getType())) {
3249 Constant *CI = ConstantInt::get(Ty->getElementType(), 1);
3250 std::vector<Constant*> Elts(Ty->getNumElements(), CI);
3251 return ReplaceInstUsesWith(I, ConstantVector::get(Elts));
3254 Constant *CI = ConstantInt::get(I.getType(), 1);
3255 return ReplaceInstUsesWith(I, CI);
3258 if (Instruction *Common = commonDivTransforms(I))
3261 // Handle cases involving: [su]div X, (select Cond, Y, Z)
3262 // This does not apply for fdiv.
3263 if (isa<SelectInst>(Op1) && SimplifyDivRemOfSelect(I))
3266 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
3268 if (RHS->equalsInt(1))
3269 return ReplaceInstUsesWith(I, Op0);
3271 // (X / C1) / C2 -> X / (C1*C2)
3272 if (Instruction *LHS = dyn_cast<Instruction>(Op0))
3273 if (Instruction::BinaryOps(LHS->getOpcode()) == I.getOpcode())
3274 if (ConstantInt *LHSRHS = dyn_cast<ConstantInt>(LHS->getOperand(1))) {
3275 if (MultiplyOverflows(RHS, LHSRHS,
3276 I.getOpcode()==Instruction::SDiv))
3277 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
3279 return BinaryOperator::Create(I.getOpcode(), LHS->getOperand(0),
3280 ConstantExpr::getMul(RHS, LHSRHS));
3283 if (!RHS->isZero()) { // avoid X udiv 0
3284 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
3285 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
3287 if (isa<PHINode>(Op0))
3288 if (Instruction *NV = FoldOpIntoPhi(I))
3293 // 0 / X == 0, we don't need to preserve faults!
3294 if (ConstantInt *LHS = dyn_cast<ConstantInt>(Op0))
3295 if (LHS->equalsInt(0))
3296 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
3298 // It can't be division by zero, hence it must be division by one.
3299 if (I.getType() == Type::getInt1Ty(*Context))
3300 return ReplaceInstUsesWith(I, Op0);
3302 if (ConstantVector *Op1V = dyn_cast<ConstantVector>(Op1)) {
3303 if (ConstantInt *X = cast_or_null<ConstantInt>(Op1V->getSplatValue()))
3306 return ReplaceInstUsesWith(I, Op0);
3312 Instruction *InstCombiner::visitUDiv(BinaryOperator &I) {
3313 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3315 // Handle the integer div common cases
3316 if (Instruction *Common = commonIDivTransforms(I))
3319 if (ConstantInt *C = dyn_cast<ConstantInt>(Op1)) {
3320 // X udiv C^2 -> X >> C
3321 // Check to see if this is an unsigned division with an exact power of 2,
3322 // if so, convert to a right shift.
3323 if (C->getValue().isPowerOf2()) // 0 not included in isPowerOf2
3324 return BinaryOperator::CreateLShr(Op0,
3325 ConstantInt::get(Op0->getType(), C->getValue().logBase2()));
3327 // X udiv C, where C >= signbit
3328 if (C->getValue().isNegative()) {
3329 Value *IC = Builder->CreateICmpULT( Op0, C);
3330 return SelectInst::Create(IC, Constant::getNullValue(I.getType()),
3331 ConstantInt::get(I.getType(), 1));
3335 // X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2)
3336 if (BinaryOperator *RHSI = dyn_cast<BinaryOperator>(I.getOperand(1))) {
3337 if (RHSI->getOpcode() == Instruction::Shl &&
3338 isa<ConstantInt>(RHSI->getOperand(0))) {
3339 const APInt& C1 = cast<ConstantInt>(RHSI->getOperand(0))->getValue();
3340 if (C1.isPowerOf2()) {
3341 Value *N = RHSI->getOperand(1);
3342 const Type *NTy = N->getType();
3343 if (uint32_t C2 = C1.logBase2())
3344 N = Builder->CreateAdd(N, ConstantInt::get(NTy, C2), "tmp");
3345 return BinaryOperator::CreateLShr(Op0, N);
3350 // udiv X, (Select Cond, C1, C2) --> Select Cond, (shr X, C1), (shr X, C2)
3351 // where C1&C2 are powers of two.
3352 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
3353 if (ConstantInt *STO = dyn_cast<ConstantInt>(SI->getOperand(1)))
3354 if (ConstantInt *SFO = dyn_cast<ConstantInt>(SI->getOperand(2))) {
3355 const APInt &TVA = STO->getValue(), &FVA = SFO->getValue();
3356 if (TVA.isPowerOf2() && FVA.isPowerOf2()) {
3357 // Compute the shift amounts
3358 uint32_t TSA = TVA.logBase2(), FSA = FVA.logBase2();
3359 // Construct the "on true" case of the select
3360 Constant *TC = ConstantInt::get(Op0->getType(), TSA);
3361 Value *TSI = Builder->CreateLShr(Op0, TC, SI->getName()+".t");
3363 // Construct the "on false" case of the select
3364 Constant *FC = ConstantInt::get(Op0->getType(), FSA);
3365 Value *FSI = Builder->CreateLShr(Op0, FC, SI->getName()+".f");
3367 // construct the select instruction and return it.
3368 return SelectInst::Create(SI->getOperand(0), TSI, FSI, SI->getName());
3374 Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
3375 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3377 // Handle the integer div common cases
3378 if (Instruction *Common = commonIDivTransforms(I))
3381 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
3383 if (RHS->isAllOnesValue())
3384 return BinaryOperator::CreateNeg(Op0);
3386 // sdiv X, C --> ashr X, log2(C)
3387 if (cast<SDivOperator>(&I)->isExact() &&
3388 RHS->getValue().isNonNegative() &&
3389 RHS->getValue().isPowerOf2()) {
3390 Value *ShAmt = llvm::ConstantInt::get(RHS->getType(),
3391 RHS->getValue().exactLogBase2());
3392 return BinaryOperator::CreateAShr(Op0, ShAmt, I.getName());
3395 // -X/C --> X/-C provided the negation doesn't overflow.
3396 if (SubOperator *Sub = dyn_cast<SubOperator>(Op0))
3397 if (isa<Constant>(Sub->getOperand(0)) &&
3398 cast<Constant>(Sub->getOperand(0))->isNullValue() &&
3399 Sub->hasNoSignedWrap())
3400 return BinaryOperator::CreateSDiv(Sub->getOperand(1),
3401 ConstantExpr::getNeg(RHS));
3404 // If the sign bits of both operands are zero (i.e. we can prove they are
3405 // unsigned inputs), turn this into a udiv.
3406 if (I.getType()->isInteger()) {
3407 APInt Mask(APInt::getSignBit(I.getType()->getPrimitiveSizeInBits()));
3408 if (MaskedValueIsZero(Op0, Mask)) {
3409 if (MaskedValueIsZero(Op1, Mask)) {
3410 // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set
3411 return BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
3413 ConstantInt *ShiftedInt;
3414 if (match(Op1, m_Shl(m_ConstantInt(ShiftedInt), m_Value())) &&
3415 ShiftedInt->getValue().isPowerOf2()) {
3416 // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y)
3417 // Safe because the only negative value (1 << Y) can take on is
3418 // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have
3419 // the sign bit set.
3420 return BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
3428 Instruction *InstCombiner::visitFDiv(BinaryOperator &I) {
3429 return commonDivTransforms(I);
3432 /// This function implements the transforms on rem instructions that work
3433 /// regardless of the kind of rem instruction it is (urem, srem, or frem). It
3434 /// is used by the visitors to those instructions.
3435 /// @brief Transforms common to all three rem instructions
3436 Instruction *InstCombiner::commonRemTransforms(BinaryOperator &I) {
3437 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3439 if (isa<UndefValue>(Op0)) { // undef % X -> 0
3440 if (I.getType()->isFPOrFPVector())
3441 return ReplaceInstUsesWith(I, Op0); // X % undef -> undef (could be SNaN)
3442 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
3444 if (isa<UndefValue>(Op1))
3445 return ReplaceInstUsesWith(I, Op1); // X % undef -> undef
3447 // Handle cases involving: rem X, (select Cond, Y, Z)
3448 if (isa<SelectInst>(Op1) && SimplifyDivRemOfSelect(I))
3454 /// This function implements the transforms common to both integer remainder
3455 /// instructions (urem and srem). It is called by the visitors to those integer
3456 /// remainder instructions.
3457 /// @brief Common integer remainder transforms
3458 Instruction *InstCombiner::commonIRemTransforms(BinaryOperator &I) {
3459 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3461 if (Instruction *common = commonRemTransforms(I))
3464 // 0 % X == 0 for integer, we don't need to preserve faults!
3465 if (Constant *LHS = dyn_cast<Constant>(Op0))
3466 if (LHS->isNullValue())
3467 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
3469 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
3470 // X % 0 == undef, we don't need to preserve faults!
3471 if (RHS->equalsInt(0))
3472 return ReplaceInstUsesWith(I, UndefValue::get(I.getType()));
3474 if (RHS->equalsInt(1)) // X % 1 == 0
3475 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
3477 if (Instruction *Op0I = dyn_cast<Instruction>(Op0)) {
3478 if (SelectInst *SI = dyn_cast<SelectInst>(Op0I)) {
3479 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
3481 } else if (isa<PHINode>(Op0I)) {
3482 if (Instruction *NV = FoldOpIntoPhi(I))
3486 // See if we can fold away this rem instruction.
3487 if (SimplifyDemandedInstructionBits(I))
3495 Instruction *InstCombiner::visitURem(BinaryOperator &I) {
3496 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3498 if (Instruction *common = commonIRemTransforms(I))
3501 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
3502 // X urem C^2 -> X and C
3503 // Check to see if this is an unsigned remainder with an exact power of 2,
3504 // if so, convert to a bitwise and.
3505 if (ConstantInt *C = dyn_cast<ConstantInt>(RHS))
3506 if (C->getValue().isPowerOf2())
3507 return BinaryOperator::CreateAnd(Op0, SubOne(C));
3510 if (Instruction *RHSI = dyn_cast<Instruction>(I.getOperand(1))) {
3511 // Turn A % (C << N), where C is 2^k, into A & ((C << N)-1)
3512 if (RHSI->getOpcode() == Instruction::Shl &&
3513 isa<ConstantInt>(RHSI->getOperand(0))) {
3514 if (cast<ConstantInt>(RHSI->getOperand(0))->getValue().isPowerOf2()) {
3515 Constant *N1 = Constant::getAllOnesValue(I.getType());
3516 Value *Add = Builder->CreateAdd(RHSI, N1, "tmp");
3517 return BinaryOperator::CreateAnd(Op0, Add);
3522 // urem X, (select Cond, 2^C1, 2^C2) --> select Cond, (and X, C1), (and X, C2)
3523 // where C1&C2 are powers of two.
3524 if (SelectInst *SI = dyn_cast<SelectInst>(Op1)) {
3525 if (ConstantInt *STO = dyn_cast<ConstantInt>(SI->getOperand(1)))
3526 if (ConstantInt *SFO = dyn_cast<ConstantInt>(SI->getOperand(2))) {
3527 // STO == 0 and SFO == 0 handled above.
3528 if ((STO->getValue().isPowerOf2()) &&
3529 (SFO->getValue().isPowerOf2())) {
3530 Value *TrueAnd = Builder->CreateAnd(Op0, SubOne(STO),
3531 SI->getName()+".t");
3532 Value *FalseAnd = Builder->CreateAnd(Op0, SubOne(SFO),
3533 SI->getName()+".f");
3534 return SelectInst::Create(SI->getOperand(0), TrueAnd, FalseAnd);
3542 Instruction *InstCombiner::visitSRem(BinaryOperator &I) {
3543 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3545 // Handle the integer rem common cases
3546 if (Instruction *Common = commonIRemTransforms(I))
3549 if (Value *RHSNeg = dyn_castNegVal(Op1))
3550 if (!isa<Constant>(RHSNeg) ||
3551 (isa<ConstantInt>(RHSNeg) &&
3552 cast<ConstantInt>(RHSNeg)->getValue().isStrictlyPositive())) {
3554 Worklist.AddValue(I.getOperand(1));
3555 I.setOperand(1, RHSNeg);
3559 // If the sign bits of both operands are zero (i.e. we can prove they are
3560 // unsigned inputs), turn this into a urem.
3561 if (I.getType()->isInteger()) {
3562 APInt Mask(APInt::getSignBit(I.getType()->getPrimitiveSizeInBits()));
3563 if (MaskedValueIsZero(Op1, Mask) && MaskedValueIsZero(Op0, Mask)) {
3564 // X srem Y -> X urem Y, iff X and Y don't have sign bit set
3565 return BinaryOperator::CreateURem(Op0, Op1, I.getName());
3569 // If it's a constant vector, flip any negative values positive.
3570 if (ConstantVector *RHSV = dyn_cast<ConstantVector>(Op1)) {
3571 unsigned VWidth = RHSV->getNumOperands();
3573 bool hasNegative = false;
3574 for (unsigned i = 0; !hasNegative && i != VWidth; ++i)
3575 if (ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV->getOperand(i)))
3576 if (RHS->getValue().isNegative())
3580 std::vector<Constant *> Elts(VWidth);
3581 for (unsigned i = 0; i != VWidth; ++i) {
3582 if (ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV->getOperand(i))) {
3583 if (RHS->getValue().isNegative())
3584 Elts[i] = cast<ConstantInt>(ConstantExpr::getNeg(RHS));
3590 Constant *NewRHSV = ConstantVector::get(Elts);
3591 if (NewRHSV != RHSV) {
3592 Worklist.AddValue(I.getOperand(1));
3593 I.setOperand(1, NewRHSV);
3602 Instruction *InstCombiner::visitFRem(BinaryOperator &I) {
3603 return commonRemTransforms(I);
3606 // isOneBitSet - Return true if there is exactly one bit set in the specified
3608 static bool isOneBitSet(const ConstantInt *CI) {
3609 return CI->getValue().isPowerOf2();
3612 // isHighOnes - Return true if the constant is of the form 1+0+.
3613 // This is the same as lowones(~X).
3614 static bool isHighOnes(const ConstantInt *CI) {
3615 return (~CI->getValue() + 1).isPowerOf2();
3618 /// getICmpCode - Encode a icmp predicate into a three bit mask. These bits
3619 /// are carefully arranged to allow folding of expressions such as:
3621 /// (A < B) | (A > B) --> (A != B)
3623 /// Note that this is only valid if the first and second predicates have the
3624 /// same sign. Is illegal to do: (A u< B) | (A s> B)
3626 /// Three bits are used to represent the condition, as follows:
3631 /// <=> Value Definition
3632 /// 000 0 Always false
3639 /// 111 7 Always true
3641 static unsigned getICmpCode(const ICmpInst *ICI) {
3642 switch (ICI->getPredicate()) {
3644 case ICmpInst::ICMP_UGT: return 1; // 001
3645 case ICmpInst::ICMP_SGT: return 1; // 001
3646 case ICmpInst::ICMP_EQ: return 2; // 010
3647 case ICmpInst::ICMP_UGE: return 3; // 011
3648 case ICmpInst::ICMP_SGE: return 3; // 011
3649 case ICmpInst::ICMP_ULT: return 4; // 100
3650 case ICmpInst::ICMP_SLT: return 4; // 100
3651 case ICmpInst::ICMP_NE: return 5; // 101
3652 case ICmpInst::ICMP_ULE: return 6; // 110
3653 case ICmpInst::ICMP_SLE: return 6; // 110
3656 llvm_unreachable("Invalid ICmp predicate!");
3661 /// getFCmpCode - Similar to getICmpCode but for FCmpInst. This encodes a fcmp
3662 /// predicate into a three bit mask. It also returns whether it is an ordered
3663 /// predicate by reference.
3664 static unsigned getFCmpCode(FCmpInst::Predicate CC, bool &isOrdered) {
3667 case FCmpInst::FCMP_ORD: isOrdered = true; return 0; // 000
3668 case FCmpInst::FCMP_UNO: return 0; // 000
3669 case FCmpInst::FCMP_OGT: isOrdered = true; return 1; // 001
3670 case FCmpInst::FCMP_UGT: return 1; // 001
3671 case FCmpInst::FCMP_OEQ: isOrdered = true; return 2; // 010
3672 case FCmpInst::FCMP_UEQ: return 2; // 010
3673 case FCmpInst::FCMP_OGE: isOrdered = true; return 3; // 011
3674 case FCmpInst::FCMP_UGE: return 3; // 011
3675 case FCmpInst::FCMP_OLT: isOrdered = true; return 4; // 100
3676 case FCmpInst::FCMP_ULT: return 4; // 100
3677 case FCmpInst::FCMP_ONE: isOrdered = true; return 5; // 101
3678 case FCmpInst::FCMP_UNE: return 5; // 101
3679 case FCmpInst::FCMP_OLE: isOrdered = true; return 6; // 110
3680 case FCmpInst::FCMP_ULE: return 6; // 110
3683 // Not expecting FCMP_FALSE and FCMP_TRUE;
3684 llvm_unreachable("Unexpected FCmp predicate!");
3689 /// getICmpValue - This is the complement of getICmpCode, which turns an
3690 /// opcode and two operands into either a constant true or false, or a brand
3691 /// new ICmp instruction. The sign is passed in to determine which kind
3692 /// of predicate to use in the new icmp instruction.
3693 static Value *getICmpValue(bool sign, unsigned code, Value *LHS, Value *RHS,
3694 LLVMContext *Context) {
3696 default: llvm_unreachable("Illegal ICmp code!");
3697 case 0: return ConstantInt::getFalse(*Context);
3700 return new ICmpInst(ICmpInst::ICMP_SGT, LHS, RHS);
3702 return new ICmpInst(ICmpInst::ICMP_UGT, LHS, RHS);
3703 case 2: return new ICmpInst(ICmpInst::ICMP_EQ, LHS, RHS);
3706 return new ICmpInst(ICmpInst::ICMP_SGE, LHS, RHS);
3708 return new ICmpInst(ICmpInst::ICMP_UGE, LHS, RHS);
3711 return new ICmpInst(ICmpInst::ICMP_SLT, LHS, RHS);
3713 return new ICmpInst(ICmpInst::ICMP_ULT, LHS, RHS);
3714 case 5: return new ICmpInst(ICmpInst::ICMP_NE, LHS, RHS);
3717 return new ICmpInst(ICmpInst::ICMP_SLE, LHS, RHS);
3719 return new ICmpInst(ICmpInst::ICMP_ULE, LHS, RHS);
3720 case 7: return ConstantInt::getTrue(*Context);
3724 /// getFCmpValue - This is the complement of getFCmpCode, which turns an
3725 /// opcode and two operands into either a FCmp instruction. isordered is passed
3726 /// in to determine which kind of predicate to use in the new fcmp instruction.
3727 static Value *getFCmpValue(bool isordered, unsigned code,
3728 Value *LHS, Value *RHS, LLVMContext *Context) {
3730 default: llvm_unreachable("Illegal FCmp code!");
3733 return new FCmpInst(FCmpInst::FCMP_ORD, LHS, RHS);
3735 return new FCmpInst(FCmpInst::FCMP_UNO, LHS, RHS);
3738 return new FCmpInst(FCmpInst::FCMP_OGT, LHS, RHS);
3740 return new FCmpInst(FCmpInst::FCMP_UGT, LHS, RHS);
3743 return new FCmpInst(FCmpInst::FCMP_OEQ, LHS, RHS);
3745 return new FCmpInst(FCmpInst::FCMP_UEQ, LHS, RHS);
3748 return new FCmpInst(FCmpInst::FCMP_OGE, LHS, RHS);
3750 return new FCmpInst(FCmpInst::FCMP_UGE, LHS, RHS);
3753 return new FCmpInst(FCmpInst::FCMP_OLT, LHS, RHS);
3755 return new FCmpInst(FCmpInst::FCMP_ULT, LHS, RHS);
3758 return new FCmpInst(FCmpInst::FCMP_ONE, LHS, RHS);
3760 return new FCmpInst(FCmpInst::FCMP_UNE, LHS, RHS);
3763 return new FCmpInst(FCmpInst::FCMP_OLE, LHS, RHS);
3765 return new FCmpInst(FCmpInst::FCMP_ULE, LHS, RHS);
3766 case 7: return ConstantInt::getTrue(*Context);
3770 /// PredicatesFoldable - Return true if both predicates match sign or if at
3771 /// least one of them is an equality comparison (which is signless).
3772 static bool PredicatesFoldable(ICmpInst::Predicate p1, ICmpInst::Predicate p2) {
3773 return (CmpInst::isSigned(p1) == CmpInst::isSigned(p2)) ||
3774 (CmpInst::isSigned(p1) && ICmpInst::isEquality(p2)) ||
3775 (CmpInst::isSigned(p2) && ICmpInst::isEquality(p1));
3779 // FoldICmpLogical - Implements (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
3780 struct FoldICmpLogical {
3783 ICmpInst::Predicate pred;
3784 FoldICmpLogical(InstCombiner &ic, ICmpInst *ICI)
3785 : IC(ic), LHS(ICI->getOperand(0)), RHS(ICI->getOperand(1)),
3786 pred(ICI->getPredicate()) {}
3787 bool shouldApply(Value *V) const {
3788 if (ICmpInst *ICI = dyn_cast<ICmpInst>(V))
3789 if (PredicatesFoldable(pred, ICI->getPredicate()))
3790 return ((ICI->getOperand(0) == LHS && ICI->getOperand(1) == RHS) ||
3791 (ICI->getOperand(0) == RHS && ICI->getOperand(1) == LHS));
3794 Instruction *apply(Instruction &Log) const {
3795 ICmpInst *ICI = cast<ICmpInst>(Log.getOperand(0));
3796 if (ICI->getOperand(0) != LHS) {
3797 assert(ICI->getOperand(1) == LHS);
3798 ICI->swapOperands(); // Swap the LHS and RHS of the ICmp
3801 ICmpInst *RHSICI = cast<ICmpInst>(Log.getOperand(1));
3802 unsigned LHSCode = getICmpCode(ICI);
3803 unsigned RHSCode = getICmpCode(RHSICI);
3805 switch (Log.getOpcode()) {
3806 case Instruction::And: Code = LHSCode & RHSCode; break;
3807 case Instruction::Or: Code = LHSCode | RHSCode; break;
3808 case Instruction::Xor: Code = LHSCode ^ RHSCode; break;
3809 default: llvm_unreachable("Illegal logical opcode!"); return 0;
3812 bool isSigned = RHSICI->isSigned() || ICI->isSigned();
3813 Value *RV = getICmpValue(isSigned, Code, LHS, RHS, IC.getContext());
3814 if (Instruction *I = dyn_cast<Instruction>(RV))
3816 // Otherwise, it's a constant boolean value...
3817 return IC.ReplaceInstUsesWith(Log, RV);
3820 } // end anonymous namespace
3822 // OptAndOp - This handles expressions of the form ((val OP C1) & C2). Where
3823 // the Op parameter is 'OP', OpRHS is 'C1', and AndRHS is 'C2'. Op is
3824 // guaranteed to be a binary operator.
3825 Instruction *InstCombiner::OptAndOp(Instruction *Op,
3827 ConstantInt *AndRHS,
3828 BinaryOperator &TheAnd) {
3829 Value *X = Op->getOperand(0);
3830 Constant *Together = 0;
3832 Together = ConstantExpr::getAnd(AndRHS, OpRHS);
3834 switch (Op->getOpcode()) {
3835 case Instruction::Xor:
3836 if (Op->hasOneUse()) {
3837 // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
3838 Value *And = Builder->CreateAnd(X, AndRHS);
3840 return BinaryOperator::CreateXor(And, Together);
3843 case Instruction::Or:
3844 if (Together == AndRHS) // (X | C) & C --> C
3845 return ReplaceInstUsesWith(TheAnd, AndRHS);
3847 if (Op->hasOneUse() && Together != OpRHS) {
3848 // (X | C1) & C2 --> (X | (C1&C2)) & C2
3849 Value *Or = Builder->CreateOr(X, Together);
3851 return BinaryOperator::CreateAnd(Or, AndRHS);
3854 case Instruction::Add:
3855 if (Op->hasOneUse()) {
3856 // Adding a one to a single bit bit-field should be turned into an XOR
3857 // of the bit. First thing to check is to see if this AND is with a
3858 // single bit constant.
3859 const APInt& AndRHSV = cast<ConstantInt>(AndRHS)->getValue();
3861 // If there is only one bit set...
3862 if (isOneBitSet(cast<ConstantInt>(AndRHS))) {
3863 // Ok, at this point, we know that we are masking the result of the
3864 // ADD down to exactly one bit. If the constant we are adding has
3865 // no bits set below this bit, then we can eliminate the ADD.
3866 const APInt& AddRHS = cast<ConstantInt>(OpRHS)->getValue();
3868 // Check to see if any bits below the one bit set in AndRHSV are set.
3869 if ((AddRHS & (AndRHSV-1)) == 0) {
3870 // If not, the only thing that can effect the output of the AND is
3871 // the bit specified by AndRHSV. If that bit is set, the effect of
3872 // the XOR is to toggle the bit. If it is clear, then the ADD has
3874 if ((AddRHS & AndRHSV) == 0) { // Bit is not set, noop
3875 TheAnd.setOperand(0, X);
3878 // Pull the XOR out of the AND.
3879 Value *NewAnd = Builder->CreateAnd(X, AndRHS);
3880 NewAnd->takeName(Op);
3881 return BinaryOperator::CreateXor(NewAnd, AndRHS);
3888 case Instruction::Shl: {
3889 // We know that the AND will not produce any of the bits shifted in, so if
3890 // the anded constant includes them, clear them now!
3892 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
3893 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
3894 APInt ShlMask(APInt::getHighBitsSet(BitWidth, BitWidth-OpRHSVal));
3895 ConstantInt *CI = ConstantInt::get(*Context, AndRHS->getValue() & ShlMask);
3897 if (CI->getValue() == ShlMask) {
3898 // Masking out bits that the shift already masks
3899 return ReplaceInstUsesWith(TheAnd, Op); // No need for the and.
3900 } else if (CI != AndRHS) { // Reducing bits set in and.
3901 TheAnd.setOperand(1, CI);
3906 case Instruction::LShr:
3908 // We know that the AND will not produce any of the bits shifted in, so if
3909 // the anded constant includes them, clear them now! This only applies to
3910 // unsigned shifts, because a signed shr may bring in set bits!
3912 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
3913 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
3914 APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
3915 ConstantInt *CI = ConstantInt::get(*Context, AndRHS->getValue() & ShrMask);
3917 if (CI->getValue() == ShrMask) {
3918 // Masking out bits that the shift already masks.
3919 return ReplaceInstUsesWith(TheAnd, Op);
3920 } else if (CI != AndRHS) {
3921 TheAnd.setOperand(1, CI); // Reduce bits set in and cst.
3926 case Instruction::AShr:
3928 // See if this is shifting in some sign extension, then masking it out
3930 if (Op->hasOneUse()) {
3931 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
3932 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
3933 APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
3934 Constant *C = ConstantInt::get(*Context, AndRHS->getValue() & ShrMask);
3935 if (C == AndRHS) { // Masking out bits shifted in.
3936 // (Val ashr C1) & C2 -> (Val lshr C1) & C2
3937 // Make the argument unsigned.
3938 Value *ShVal = Op->getOperand(0);
3939 ShVal = Builder->CreateLShr(ShVal, OpRHS, Op->getName());
3940 return BinaryOperator::CreateAnd(ShVal, AndRHS, TheAnd.getName());
3949 /// InsertRangeTest - Emit a computation of: (V >= Lo && V < Hi) if Inside is
3950 /// true, otherwise (V < Lo || V >= Hi). In pratice, we emit the more efficient
3951 /// (V-Lo) <u Hi-Lo. This method expects that Lo <= Hi. isSigned indicates
3952 /// whether to treat the V, Lo and HI as signed or not. IB is the location to
3953 /// insert new instructions.
3954 Instruction *InstCombiner::InsertRangeTest(Value *V, Constant *Lo, Constant *Hi,
3955 bool isSigned, bool Inside,
3957 assert(cast<ConstantInt>(ConstantExpr::getICmp((isSigned ?
3958 ICmpInst::ICMP_SLE:ICmpInst::ICMP_ULE), Lo, Hi))->getZExtValue() &&
3959 "Lo is not <= Hi in range emission code!");
3962 if (Lo == Hi) // Trivially false.
3963 return new ICmpInst(ICmpInst::ICMP_NE, V, V);
3965 // V >= Min && V < Hi --> V < Hi
3966 if (cast<ConstantInt>(Lo)->isMinValue(isSigned)) {
3967 ICmpInst::Predicate pred = (isSigned ?
3968 ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT);
3969 return new ICmpInst(pred, V, Hi);
3972 // Emit V-Lo <u Hi-Lo
3973 Constant *NegLo = ConstantExpr::getNeg(Lo);
3974 Value *Add = Builder->CreateAdd(V, NegLo, V->getName()+".off");
3975 Constant *UpperBound = ConstantExpr::getAdd(NegLo, Hi);
3976 return new ICmpInst(ICmpInst::ICMP_ULT, Add, UpperBound);
3979 if (Lo == Hi) // Trivially true.
3980 return new ICmpInst(ICmpInst::ICMP_EQ, V, V);
3982 // V < Min || V >= Hi -> V > Hi-1
3983 Hi = SubOne(cast<ConstantInt>(Hi));
3984 if (cast<ConstantInt>(Lo)->isMinValue(isSigned)) {
3985 ICmpInst::Predicate pred = (isSigned ?
3986 ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT);
3987 return new ICmpInst(pred, V, Hi);
3990 // Emit V-Lo >u Hi-1-Lo
3991 // Note that Hi has already had one subtracted from it, above.
3992 ConstantInt *NegLo = cast<ConstantInt>(ConstantExpr::getNeg(Lo));
3993 Value *Add = Builder->CreateAdd(V, NegLo, V->getName()+".off");
3994 Constant *LowerBound = ConstantExpr::getAdd(NegLo, Hi);
3995 return new ICmpInst(ICmpInst::ICMP_UGT, Add, LowerBound);
3998 // isRunOfOnes - Returns true iff Val consists of one contiguous run of 1s with
3999 // any number of 0s on either side. The 1s are allowed to wrap from LSB to
4000 // MSB, so 0x000FFF0, 0x0000FFFF, and 0xFF0000FF are all runs. 0x0F0F0000 is
4001 // not, since all 1s are not contiguous.
4002 static bool isRunOfOnes(ConstantInt *Val, uint32_t &MB, uint32_t &ME) {
4003 const APInt& V = Val->getValue();
4004 uint32_t BitWidth = Val->getType()->getBitWidth();
4005 if (!APIntOps::isShiftedMask(BitWidth, V)) return false;
4007 // look for the first zero bit after the run of ones
4008 MB = BitWidth - ((V - 1) ^ V).countLeadingZeros();
4009 // look for the first non-zero bit
4010 ME = V.getActiveBits();
4014 /// FoldLogicalPlusAnd - This is part of an expression (LHS +/- RHS) & Mask,
4015 /// where isSub determines whether the operator is a sub. If we can fold one of
4016 /// the following xforms:
4018 /// ((A & N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == Mask
4019 /// ((A | N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
4020 /// ((A ^ N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
4022 /// return (A +/- B).
4024 Value *InstCombiner::FoldLogicalPlusAnd(Value *LHS, Value *RHS,
4025 ConstantInt *Mask, bool isSub,
4027 Instruction *LHSI = dyn_cast<Instruction>(LHS);
4028 if (!LHSI || LHSI->getNumOperands() != 2 ||
4029 !isa<ConstantInt>(LHSI->getOperand(1))) return 0;
4031 ConstantInt *N = cast<ConstantInt>(LHSI->getOperand(1));
4033 switch (LHSI->getOpcode()) {
4035 case Instruction::And:
4036 if (ConstantExpr::getAnd(N, Mask) == Mask) {
4037 // If the AndRHS is a power of two minus one (0+1+), this is simple.
4038 if ((Mask->getValue().countLeadingZeros() +
4039 Mask->getValue().countPopulation()) ==
4040 Mask->getValue().getBitWidth())
4043 // Otherwise, if Mask is 0+1+0+, and if B is known to have the low 0+
4044 // part, we don't need any explicit masks to take them out of A. If that
4045 // is all N is, ignore it.
4046 uint32_t MB = 0, ME = 0;
4047 if (isRunOfOnes(Mask, MB, ME)) { // begin/end bit of run, inclusive
4048 uint32_t BitWidth = cast<IntegerType>(RHS->getType())->getBitWidth();
4049 APInt Mask(APInt::getLowBitsSet(BitWidth, MB-1));
4050 if (MaskedValueIsZero(RHS, Mask))
4055 case Instruction::Or:
4056 case Instruction::Xor:
4057 // If the AndRHS is a power of two minus one (0+1+), and N&Mask == 0
4058 if ((Mask->getValue().countLeadingZeros() +
4059 Mask->getValue().countPopulation()) == Mask->getValue().getBitWidth()
4060 && ConstantExpr::getAnd(N, Mask)->isNullValue())
4066 return Builder->CreateSub(LHSI->getOperand(0), RHS, "fold");
4067 return Builder->CreateAdd(LHSI->getOperand(0), RHS, "fold");
4070 /// FoldAndOfICmps - Fold (icmp)&(icmp) if possible.
4071 Instruction *InstCombiner::FoldAndOfICmps(Instruction &I,
4072 ICmpInst *LHS, ICmpInst *RHS) {
4074 ConstantInt *LHSCst, *RHSCst;
4075 ICmpInst::Predicate LHSCC, RHSCC;
4077 // This only handles icmp of constants: (icmp1 A, C1) & (icmp2 B, C2).
4078 if (!match(LHS, m_ICmp(LHSCC, m_Value(Val),
4079 m_ConstantInt(LHSCst))) ||
4080 !match(RHS, m_ICmp(RHSCC, m_Value(Val2),
4081 m_ConstantInt(RHSCst))))
4084 // (icmp ult A, C) & (icmp ult B, C) --> (icmp ult (A|B), C)
4085 // where C is a power of 2
4086 if (LHSCst == RHSCst && LHSCC == RHSCC && LHSCC == ICmpInst::ICMP_ULT &&
4087 LHSCst->getValue().isPowerOf2()) {
4088 Value *NewOr = Builder->CreateOr(Val, Val2);
4089 return new ICmpInst(LHSCC, NewOr, LHSCst);
4092 // From here on, we only handle:
4093 // (icmp1 A, C1) & (icmp2 A, C2) --> something simpler.
4094 if (Val != Val2) return 0;
4096 // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
4097 if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE ||
4098 RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE ||
4099 LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE ||
4100 RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE)
4103 // We can't fold (ugt x, C) & (sgt x, C2).
4104 if (!PredicatesFoldable(LHSCC, RHSCC))
4107 // Ensure that the larger constant is on the RHS.
4109 if (CmpInst::isSigned(LHSCC) ||
4110 (ICmpInst::isEquality(LHSCC) &&
4111 CmpInst::isSigned(RHSCC)))
4112 ShouldSwap = LHSCst->getValue().sgt(RHSCst->getValue());
4114 ShouldSwap = LHSCst->getValue().ugt(RHSCst->getValue());
4117 std::swap(LHS, RHS);
4118 std::swap(LHSCst, RHSCst);
4119 std::swap(LHSCC, RHSCC);
4122 // At this point, we know we have have two icmp instructions
4123 // comparing a value against two constants and and'ing the result
4124 // together. Because of the above check, we know that we only have
4125 // icmp eq, icmp ne, icmp [su]lt, and icmp [SU]gt here. We also know
4126 // (from the FoldICmpLogical check above), that the two constants
4127 // are not equal and that the larger constant is on the RHS
4128 assert(LHSCst != RHSCst && "Compares not folded above?");
4131 default: llvm_unreachable("Unknown integer condition code!");
4132 case ICmpInst::ICMP_EQ:
4134 default: llvm_unreachable("Unknown integer condition code!");
4135 case ICmpInst::ICMP_EQ: // (X == 13 & X == 15) -> false
4136 case ICmpInst::ICMP_UGT: // (X == 13 & X > 15) -> false
4137 case ICmpInst::ICMP_SGT: // (X == 13 & X > 15) -> false
4138 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
4139 case ICmpInst::ICMP_NE: // (X == 13 & X != 15) -> X == 13
4140 case ICmpInst::ICMP_ULT: // (X == 13 & X < 15) -> X == 13
4141 case ICmpInst::ICMP_SLT: // (X == 13 & X < 15) -> X == 13
4142 return ReplaceInstUsesWith(I, LHS);
4144 case ICmpInst::ICMP_NE:
4146 default: llvm_unreachable("Unknown integer condition code!");
4147 case ICmpInst::ICMP_ULT:
4148 if (LHSCst == SubOne(RHSCst)) // (X != 13 & X u< 14) -> X < 13
4149 return new ICmpInst(ICmpInst::ICMP_ULT, Val, LHSCst);
4150 break; // (X != 13 & X u< 15) -> no change
4151 case ICmpInst::ICMP_SLT:
4152 if (LHSCst == SubOne(RHSCst)) // (X != 13 & X s< 14) -> X < 13
4153 return new ICmpInst(ICmpInst::ICMP_SLT, Val, LHSCst);
4154 break; // (X != 13 & X s< 15) -> no change
4155 case ICmpInst::ICMP_EQ: // (X != 13 & X == 15) -> X == 15
4156 case ICmpInst::ICMP_UGT: // (X != 13 & X u> 15) -> X u> 15
4157 case ICmpInst::ICMP_SGT: // (X != 13 & X s> 15) -> X s> 15
4158 return ReplaceInstUsesWith(I, RHS);
4159 case ICmpInst::ICMP_NE:
4160 if (LHSCst == SubOne(RHSCst)){// (X != 13 & X != 14) -> X-13 >u 1
4161 Constant *AddCST = ConstantExpr::getNeg(LHSCst);
4162 Value *Add = Builder->CreateAdd(Val, AddCST, Val->getName()+".off");
4163 return new ICmpInst(ICmpInst::ICMP_UGT, Add,
4164 ConstantInt::get(Add->getType(), 1));
4166 break; // (X != 13 & X != 15) -> no change
4169 case ICmpInst::ICMP_ULT:
4171 default: llvm_unreachable("Unknown integer condition code!");
4172 case ICmpInst::ICMP_EQ: // (X u< 13 & X == 15) -> false
4173 case ICmpInst::ICMP_UGT: // (X u< 13 & X u> 15) -> false
4174 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
4175 case ICmpInst::ICMP_SGT: // (X u< 13 & X s> 15) -> no change
4177 case ICmpInst::ICMP_NE: // (X u< 13 & X != 15) -> X u< 13
4178 case ICmpInst::ICMP_ULT: // (X u< 13 & X u< 15) -> X u< 13
4179 return ReplaceInstUsesWith(I, LHS);
4180 case ICmpInst::ICMP_SLT: // (X u< 13 & X s< 15) -> no change
4184 case ICmpInst::ICMP_SLT:
4186 default: llvm_unreachable("Unknown integer condition code!");
4187 case ICmpInst::ICMP_EQ: // (X s< 13 & X == 15) -> false
4188 case ICmpInst::ICMP_SGT: // (X s< 13 & X s> 15) -> false
4189 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
4190 case ICmpInst::ICMP_UGT: // (X s< 13 & X u> 15) -> no change
4192 case ICmpInst::ICMP_NE: // (X s< 13 & X != 15) -> X < 13
4193 case ICmpInst::ICMP_SLT: // (X s< 13 & X s< 15) -> X < 13
4194 return ReplaceInstUsesWith(I, LHS);
4195 case ICmpInst::ICMP_ULT: // (X s< 13 & X u< 15) -> no change
4199 case ICmpInst::ICMP_UGT:
4201 default: llvm_unreachable("Unknown integer condition code!");
4202 case ICmpInst::ICMP_EQ: // (X u> 13 & X == 15) -> X == 15
4203 case ICmpInst::ICMP_UGT: // (X u> 13 & X u> 15) -> X u> 15
4204 return ReplaceInstUsesWith(I, RHS);
4205 case ICmpInst::ICMP_SGT: // (X u> 13 & X s> 15) -> no change
4207 case ICmpInst::ICMP_NE:
4208 if (RHSCst == AddOne(LHSCst)) // (X u> 13 & X != 14) -> X u> 14
4209 return new ICmpInst(LHSCC, Val, RHSCst);
4210 break; // (X u> 13 & X != 15) -> no change
4211 case ICmpInst::ICMP_ULT: // (X u> 13 & X u< 15) -> (X-14) <u 1
4212 return InsertRangeTest(Val, AddOne(LHSCst),
4213 RHSCst, false, true, I);
4214 case ICmpInst::ICMP_SLT: // (X u> 13 & X s< 15) -> no change
4218 case ICmpInst::ICMP_SGT:
4220 default: llvm_unreachable("Unknown integer condition code!");
4221 case ICmpInst::ICMP_EQ: // (X s> 13 & X == 15) -> X == 15
4222 case ICmpInst::ICMP_SGT: // (X s> 13 & X s> 15) -> X s> 15
4223 return ReplaceInstUsesWith(I, RHS);
4224 case ICmpInst::ICMP_UGT: // (X s> 13 & X u> 15) -> no change
4226 case ICmpInst::ICMP_NE:
4227 if (RHSCst == AddOne(LHSCst)) // (X s> 13 & X != 14) -> X s> 14
4228 return new ICmpInst(LHSCC, Val, RHSCst);
4229 break; // (X s> 13 & X != 15) -> no change
4230 case ICmpInst::ICMP_SLT: // (X s> 13 & X s< 15) -> (X-14) s< 1
4231 return InsertRangeTest(Val, AddOne(LHSCst),
4232 RHSCst, true, true, I);
4233 case ICmpInst::ICMP_ULT: // (X s> 13 & X u< 15) -> no change
4242 Instruction *InstCombiner::FoldAndOfFCmps(Instruction &I, FCmpInst *LHS,
4245 if (LHS->getPredicate() == FCmpInst::FCMP_ORD &&
4246 RHS->getPredicate() == FCmpInst::FCMP_ORD) {
4247 // (fcmp ord x, c) & (fcmp ord y, c) -> (fcmp ord x, y)
4248 if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1)))
4249 if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) {
4250 // If either of the constants are nans, then the whole thing returns
4252 if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
4253 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
4254 return new FCmpInst(FCmpInst::FCMP_ORD,
4255 LHS->getOperand(0), RHS->getOperand(0));
4258 // Handle vector zeros. This occurs because the canonical form of
4259 // "fcmp ord x,x" is "fcmp ord x, 0".
4260 if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
4261 isa<ConstantAggregateZero>(RHS->getOperand(1)))
4262 return new FCmpInst(FCmpInst::FCMP_ORD,
4263 LHS->getOperand(0), RHS->getOperand(0));
4267 Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1);
4268 Value *Op1LHS = RHS->getOperand(0), *Op1RHS = RHS->getOperand(1);
4269 FCmpInst::Predicate Op0CC = LHS->getPredicate(), Op1CC = RHS->getPredicate();
4272 if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) {
4273 // Swap RHS operands to match LHS.
4274 Op1CC = FCmpInst::getSwappedPredicate(Op1CC);
4275 std::swap(Op1LHS, Op1RHS);
4278 if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) {
4279 // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y).
4281 return new FCmpInst((FCmpInst::Predicate)Op0CC, Op0LHS, Op0RHS);
4283 if (Op0CC == FCmpInst::FCMP_FALSE || Op1CC == FCmpInst::FCMP_FALSE)
4284 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
4285 if (Op0CC == FCmpInst::FCMP_TRUE)
4286 return ReplaceInstUsesWith(I, RHS);
4287 if (Op1CC == FCmpInst::FCMP_TRUE)
4288 return ReplaceInstUsesWith(I, LHS);
4292 unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered);
4293 unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered);
4295 std::swap(LHS, RHS);
4296 std::swap(Op0Pred, Op1Pred);
4297 std::swap(Op0Ordered, Op1Ordered);
4300 // uno && ueq -> uno && (uno || eq) -> ueq
4301 // ord && olt -> ord && (ord && lt) -> olt
4302 if (Op0Ordered == Op1Ordered)
4303 return ReplaceInstUsesWith(I, RHS);
4305 // uno && oeq -> uno && (ord && eq) -> false
4306 // uno && ord -> false
4308 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
4309 // ord && ueq -> ord && (uno || eq) -> oeq
4310 return cast<Instruction>(getFCmpValue(true, Op1Pred,
4311 Op0LHS, Op0RHS, Context));
4319 Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
4320 bool Changed = SimplifyCommutative(I);
4321 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4323 if (Value *V = SimplifyAndInst(Op0, Op1, TD))
4324 return ReplaceInstUsesWith(I, V);
4327 // See if we can simplify any instructions used by the instruction whose sole
4328 // purpose is to compute bits we don't care about.
4329 if (SimplifyDemandedInstructionBits(I))
4333 if (ConstantInt *AndRHS = dyn_cast<ConstantInt>(Op1)) {
4334 const APInt &AndRHSMask = AndRHS->getValue();
4335 APInt NotAndRHS(~AndRHSMask);
4337 // Optimize a variety of ((val OP C1) & C2) combinations...
4338 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
4339 Value *Op0LHS = Op0I->getOperand(0);
4340 Value *Op0RHS = Op0I->getOperand(1);
4341 switch (Op0I->getOpcode()) {
4343 case Instruction::Xor:
4344 case Instruction::Or:
4345 // If the mask is only needed on one incoming arm, push it up.
4346 if (!Op0I->hasOneUse()) break;
4348 if (MaskedValueIsZero(Op0LHS, NotAndRHS)) {
4349 // Not masking anything out for the LHS, move to RHS.
4350 Value *NewRHS = Builder->CreateAnd(Op0RHS, AndRHS,
4351 Op0RHS->getName()+".masked");
4352 return BinaryOperator::Create(Op0I->getOpcode(), Op0LHS, NewRHS);
4354 if (!isa<Constant>(Op0RHS) &&
4355 MaskedValueIsZero(Op0RHS, NotAndRHS)) {
4356 // Not masking anything out for the RHS, move to LHS.
4357 Value *NewLHS = Builder->CreateAnd(Op0LHS, AndRHS,
4358 Op0LHS->getName()+".masked");
4359 return BinaryOperator::Create(Op0I->getOpcode(), NewLHS, Op0RHS);
4363 case Instruction::Add:
4364 // ((A & N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == AndRHS.
4365 // ((A | N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
4366 // ((A ^ N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
4367 if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, false, I))
4368 return BinaryOperator::CreateAnd(V, AndRHS);
4369 if (Value *V = FoldLogicalPlusAnd(Op0RHS, Op0LHS, AndRHS, false, I))
4370 return BinaryOperator::CreateAnd(V, AndRHS); // Add commutes
4373 case Instruction::Sub:
4374 // ((A & N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == AndRHS.
4375 // ((A | N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
4376 // ((A ^ N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
4377 if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, true, I))
4378 return BinaryOperator::CreateAnd(V, AndRHS);
4380 // (A - N) & AndRHS -> -N & AndRHS iff A&AndRHS==0 and AndRHS
4381 // has 1's for all bits that the subtraction with A might affect.
4382 if (Op0I->hasOneUse()) {
4383 uint32_t BitWidth = AndRHSMask.getBitWidth();
4384 uint32_t Zeros = AndRHSMask.countLeadingZeros();
4385 APInt Mask = APInt::getLowBitsSet(BitWidth, BitWidth - Zeros);
4387 ConstantInt *A = dyn_cast<ConstantInt>(Op0LHS);
4388 if (!(A && A->isZero()) && // avoid infinite recursion.
4389 MaskedValueIsZero(Op0LHS, Mask)) {
4390 Value *NewNeg = Builder->CreateNeg(Op0RHS);
4391 return BinaryOperator::CreateAnd(NewNeg, AndRHS);
4396 case Instruction::Shl:
4397 case Instruction::LShr:
4398 // (1 << x) & 1 --> zext(x == 0)
4399 // (1 >> x) & 1 --> zext(x == 0)
4400 if (AndRHSMask == 1 && Op0LHS == AndRHS) {
4402 Builder->CreateICmpEQ(Op0RHS, Constant::getNullValue(I.getType()));
4403 return new ZExtInst(NewICmp, I.getType());
4408 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1)))
4409 if (Instruction *Res = OptAndOp(Op0I, Op0CI, AndRHS, I))
4411 } else if (CastInst *CI = dyn_cast<CastInst>(Op0)) {
4412 // If this is an integer truncation or change from signed-to-unsigned, and
4413 // if the source is an and/or with immediate, transform it. This
4414 // frequently occurs for bitfield accesses.
4415 if (Instruction *CastOp = dyn_cast<Instruction>(CI->getOperand(0))) {
4416 if ((isa<TruncInst>(CI) || isa<BitCastInst>(CI)) &&
4417 CastOp->getNumOperands() == 2)
4418 if (ConstantInt *AndCI =dyn_cast<ConstantInt>(CastOp->getOperand(1))){
4419 if (CastOp->getOpcode() == Instruction::And) {
4420 // Change: and (cast (and X, C1) to T), C2
4421 // into : and (cast X to T), trunc_or_bitcast(C1)&C2
4422 // This will fold the two constants together, which may allow
4423 // other simplifications.
4424 Value *NewCast = Builder->CreateTruncOrBitCast(
4425 CastOp->getOperand(0), I.getType(),
4426 CastOp->getName()+".shrunk");
4427 // trunc_or_bitcast(C1)&C2
4428 Constant *C3 = ConstantExpr::getTruncOrBitCast(AndCI,I.getType());
4429 C3 = ConstantExpr::getAnd(C3, AndRHS);
4430 return BinaryOperator::CreateAnd(NewCast, C3);
4431 } else if (CastOp->getOpcode() == Instruction::Or) {
4432 // Change: and (cast (or X, C1) to T), C2
4433 // into : trunc(C1)&C2 iff trunc(C1)&C2 == C2
4434 Constant *C3 = ConstantExpr::getTruncOrBitCast(AndCI,I.getType());
4435 if (ConstantExpr::getAnd(C3, AndRHS) == AndRHS)
4437 return ReplaceInstUsesWith(I, AndRHS);
4443 // Try to fold constant and into select arguments.
4444 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
4445 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
4447 if (isa<PHINode>(Op0))
4448 if (Instruction *NV = FoldOpIntoPhi(I))
4453 // (~A & ~B) == (~(A | B)) - De Morgan's Law
4454 if (Value *Op0NotVal = dyn_castNotVal(Op0))
4455 if (Value *Op1NotVal = dyn_castNotVal(Op1))
4456 if (Op0->hasOneUse() && Op1->hasOneUse()) {
4457 Value *Or = Builder->CreateOr(Op0NotVal, Op1NotVal,
4458 I.getName()+".demorgan");
4459 return BinaryOperator::CreateNot(Or);
4463 Value *A = 0, *B = 0, *C = 0, *D = 0;
4464 // (A|B) & ~(A&B) -> A^B
4465 if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
4466 match(Op1, m_Not(m_And(m_Value(C), m_Value(D)))) &&
4467 ((A == C && B == D) || (A == D && B == C)))
4468 return BinaryOperator::CreateXor(A, B);
4470 // ~(A&B) & (A|B) -> A^B
4471 if (match(Op1, m_Or(m_Value(A), m_Value(B))) &&
4472 match(Op0, m_Not(m_And(m_Value(C), m_Value(D)))) &&
4473 ((A == C && B == D) || (A == D && B == C)))
4474 return BinaryOperator::CreateXor(A, B);
4476 if (Op0->hasOneUse() &&
4477 match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
4478 if (A == Op1) { // (A^B)&A -> A&(A^B)
4479 I.swapOperands(); // Simplify below
4480 std::swap(Op0, Op1);
4481 } else if (B == Op1) { // (A^B)&B -> B&(B^A)
4482 cast<BinaryOperator>(Op0)->swapOperands();
4483 I.swapOperands(); // Simplify below
4484 std::swap(Op0, Op1);
4488 if (Op1->hasOneUse() &&
4489 match(Op1, m_Xor(m_Value(A), m_Value(B)))) {
4490 if (B == Op0) { // B&(A^B) -> B&(B^A)
4491 cast<BinaryOperator>(Op1)->swapOperands();
4494 if (A == Op0) // A&(A^B) -> A & ~B
4495 return BinaryOperator::CreateAnd(A, Builder->CreateNot(B, "tmp"));
4498 // (A&((~A)|B)) -> A&B
4499 if (match(Op0, m_Or(m_Not(m_Specific(Op1)), m_Value(A))) ||
4500 match(Op0, m_Or(m_Value(A), m_Not(m_Specific(Op1)))))
4501 return BinaryOperator::CreateAnd(A, Op1);
4502 if (match(Op1, m_Or(m_Not(m_Specific(Op0)), m_Value(A))) ||
4503 match(Op1, m_Or(m_Value(A), m_Not(m_Specific(Op0)))))
4504 return BinaryOperator::CreateAnd(A, Op0);
4507 if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1)) {
4508 // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
4509 if (Instruction *R = AssociativeOpt(I, FoldICmpLogical(*this, RHS)))
4512 if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0))
4513 if (Instruction *Res = FoldAndOfICmps(I, LHS, RHS))
4517 // fold (and (cast A), (cast B)) -> (cast (and A, B))
4518 if (CastInst *Op0C = dyn_cast<CastInst>(Op0))
4519 if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
4520 if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind ?
4521 const Type *SrcTy = Op0C->getOperand(0)->getType();
4522 if (SrcTy == Op1C->getOperand(0)->getType() &&
4523 SrcTy->isIntOrIntVector() &&
4524 // Only do this if the casts both really cause code to be generated.
4525 ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0),
4527 ValueRequiresCast(Op1C->getOpcode(), Op1C->getOperand(0),
4529 Value *NewOp = Builder->CreateAnd(Op0C->getOperand(0),
4530 Op1C->getOperand(0), I.getName());
4531 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
4535 // (X >> Z) & (Y >> Z) -> (X&Y) >> Z for all shifts.
4536 if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) {
4537 if (BinaryOperator *SI0 = dyn_cast<BinaryOperator>(Op0))
4538 if (SI0->isShift() && SI0->getOpcode() == SI1->getOpcode() &&
4539 SI0->getOperand(1) == SI1->getOperand(1) &&
4540 (SI0->hasOneUse() || SI1->hasOneUse())) {
4542 Builder->CreateAnd(SI0->getOperand(0), SI1->getOperand(0),
4544 return BinaryOperator::Create(SI1->getOpcode(), NewOp,
4545 SI1->getOperand(1));
4549 // If and'ing two fcmp, try combine them into one.
4550 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0))) {
4551 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
4552 if (Instruction *Res = FoldAndOfFCmps(I, LHS, RHS))
4556 return Changed ? &I : 0;
4559 /// CollectBSwapParts - Analyze the specified subexpression and see if it is
4560 /// capable of providing pieces of a bswap. The subexpression provides pieces
4561 /// of a bswap if it is proven that each of the non-zero bytes in the output of
4562 /// the expression came from the corresponding "byte swapped" byte in some other
4563 /// value. For example, if the current subexpression is "(shl i32 %X, 24)" then
4564 /// we know that the expression deposits the low byte of %X into the high byte
4565 /// of the bswap result and that all other bytes are zero. This expression is
4566 /// accepted, the high byte of ByteValues is set to X to indicate a correct
4569 /// This function returns true if the match was unsuccessful and false if so.
4570 /// On entry to the function the "OverallLeftShift" is a signed integer value
4571 /// indicating the number of bytes that the subexpression is later shifted. For
4572 /// example, if the expression is later right shifted by 16 bits, the
4573 /// OverallLeftShift value would be -2 on entry. This is used to specify which
4574 /// byte of ByteValues is actually being set.
4576 /// Similarly, ByteMask is a bitmask where a bit is clear if its corresponding
4577 /// byte is masked to zero by a user. For example, in (X & 255), X will be
4578 /// processed with a bytemask of 1. Because bytemask is 32-bits, this limits
4579 /// this function to working on up to 32-byte (256 bit) values. ByteMask is
4580 /// always in the local (OverallLeftShift) coordinate space.
4582 static bool CollectBSwapParts(Value *V, int OverallLeftShift, uint32_t ByteMask,
4583 SmallVector<Value*, 8> &ByteValues) {
4584 if (Instruction *I = dyn_cast<Instruction>(V)) {
4585 // If this is an or instruction, it may be an inner node of the bswap.
4586 if (I->getOpcode() == Instruction::Or) {
4587 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
4589 CollectBSwapParts(I->getOperand(1), OverallLeftShift, ByteMask,
4593 // If this is a logical shift by a constant multiple of 8, recurse with
4594 // OverallLeftShift and ByteMask adjusted.
4595 if (I->isLogicalShift() && isa<ConstantInt>(I->getOperand(1))) {
4597 cast<ConstantInt>(I->getOperand(1))->getLimitedValue(~0U);
4598 // Ensure the shift amount is defined and of a byte value.
4599 if ((ShAmt & 7) || (ShAmt > 8*ByteValues.size()))
4602 unsigned ByteShift = ShAmt >> 3;
4603 if (I->getOpcode() == Instruction::Shl) {
4604 // X << 2 -> collect(X, +2)
4605 OverallLeftShift += ByteShift;
4606 ByteMask >>= ByteShift;
4608 // X >>u 2 -> collect(X, -2)
4609 OverallLeftShift -= ByteShift;
4610 ByteMask <<= ByteShift;
4611 ByteMask &= (~0U >> (32-ByteValues.size()));
4614 if (OverallLeftShift >= (int)ByteValues.size()) return true;
4615 if (OverallLeftShift <= -(int)ByteValues.size()) return true;
4617 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
4621 // If this is a logical 'and' with a mask that clears bytes, clear the
4622 // corresponding bytes in ByteMask.
4623 if (I->getOpcode() == Instruction::And &&
4624 isa<ConstantInt>(I->getOperand(1))) {
4625 // Scan every byte of the and mask, seeing if the byte is either 0 or 255.
4626 unsigned NumBytes = ByteValues.size();
4627 APInt Byte(I->getType()->getPrimitiveSizeInBits(), 255);
4628 const APInt &AndMask = cast<ConstantInt>(I->getOperand(1))->getValue();
4630 for (unsigned i = 0; i != NumBytes; ++i, Byte <<= 8) {
4631 // If this byte is masked out by a later operation, we don't care what
4633 if ((ByteMask & (1 << i)) == 0)
4636 // If the AndMask is all zeros for this byte, clear the bit.
4637 APInt MaskB = AndMask & Byte;
4639 ByteMask &= ~(1U << i);
4643 // If the AndMask is not all ones for this byte, it's not a bytezap.
4647 // Otherwise, this byte is kept.
4650 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
4655 // Okay, we got to something that isn't a shift, 'or' or 'and'. This must be
4656 // the input value to the bswap. Some observations: 1) if more than one byte
4657 // is demanded from this input, then it could not be successfully assembled
4658 // into a byteswap. At least one of the two bytes would not be aligned with
4659 // their ultimate destination.
4660 if (!isPowerOf2_32(ByteMask)) return true;
4661 unsigned InputByteNo = CountTrailingZeros_32(ByteMask);
4663 // 2) The input and ultimate destinations must line up: if byte 3 of an i32
4664 // is demanded, it needs to go into byte 0 of the result. This means that the
4665 // byte needs to be shifted until it lands in the right byte bucket. The
4666 // shift amount depends on the position: if the byte is coming from the high
4667 // part of the value (e.g. byte 3) then it must be shifted right. If from the
4668 // low part, it must be shifted left.
4669 unsigned DestByteNo = InputByteNo + OverallLeftShift;
4670 if (InputByteNo < ByteValues.size()/2) {
4671 if (ByteValues.size()-1-DestByteNo != InputByteNo)
4674 if (ByteValues.size()-1-DestByteNo != InputByteNo)
4678 // If the destination byte value is already defined, the values are or'd
4679 // together, which isn't a bswap (unless it's an or of the same bits).
4680 if (ByteValues[DestByteNo] && ByteValues[DestByteNo] != V)
4682 ByteValues[DestByteNo] = V;
4686 /// MatchBSwap - Given an OR instruction, check to see if this is a bswap idiom.
4687 /// If so, insert the new bswap intrinsic and return it.
4688 Instruction *InstCombiner::MatchBSwap(BinaryOperator &I) {
4689 const IntegerType *ITy = dyn_cast<IntegerType>(I.getType());
4690 if (!ITy || ITy->getBitWidth() % 16 ||
4691 // ByteMask only allows up to 32-byte values.
4692 ITy->getBitWidth() > 32*8)
4693 return 0; // Can only bswap pairs of bytes. Can't do vectors.
4695 /// ByteValues - For each byte of the result, we keep track of which value
4696 /// defines each byte.
4697 SmallVector<Value*, 8> ByteValues;
4698 ByteValues.resize(ITy->getBitWidth()/8);
4700 // Try to find all the pieces corresponding to the bswap.
4701 uint32_t ByteMask = ~0U >> (32-ByteValues.size());
4702 if (CollectBSwapParts(&I, 0, ByteMask, ByteValues))
4705 // Check to see if all of the bytes come from the same value.
4706 Value *V = ByteValues[0];
4707 if (V == 0) return 0; // Didn't find a byte? Must be zero.
4709 // Check to make sure that all of the bytes come from the same value.
4710 for (unsigned i = 1, e = ByteValues.size(); i != e; ++i)
4711 if (ByteValues[i] != V)
4713 const Type *Tys[] = { ITy };
4714 Module *M = I.getParent()->getParent()->getParent();
4715 Function *F = Intrinsic::getDeclaration(M, Intrinsic::bswap, Tys, 1);
4716 return CallInst::Create(F, V);
4719 /// MatchSelectFromAndOr - We have an expression of the form (A&C)|(B&D). Check
4720 /// If A is (cond?-1:0) and either B or D is ~(cond?-1,0) or (cond?0,-1), then
4721 /// we can simplify this expression to "cond ? C : D or B".
4722 static Instruction *MatchSelectFromAndOr(Value *A, Value *B,
4724 LLVMContext *Context) {
4725 // If A is not a select of -1/0, this cannot match.
4727 if (!match(A, m_SelectCst<-1, 0>(m_Value(Cond))))
4730 // ((cond?-1:0)&C) | (B&(cond?0:-1)) -> cond ? C : B.
4731 if (match(D, m_SelectCst<0, -1>(m_Specific(Cond))))
4732 return SelectInst::Create(Cond, C, B);
4733 if (match(D, m_Not(m_SelectCst<-1, 0>(m_Specific(Cond)))))
4734 return SelectInst::Create(Cond, C, B);
4735 // ((cond?-1:0)&C) | ((cond?0:-1)&D) -> cond ? C : D.
4736 if (match(B, m_SelectCst<0, -1>(m_Specific(Cond))))
4737 return SelectInst::Create(Cond, C, D);
4738 if (match(B, m_Not(m_SelectCst<-1, 0>(m_Specific(Cond)))))
4739 return SelectInst::Create(Cond, C, D);
4743 /// FoldOrOfICmps - Fold (icmp)|(icmp) if possible.
4744 Instruction *InstCombiner::FoldOrOfICmps(Instruction &I,
4745 ICmpInst *LHS, ICmpInst *RHS) {
4747 ConstantInt *LHSCst, *RHSCst;
4748 ICmpInst::Predicate LHSCC, RHSCC;
4750 // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2).
4751 if (!match(LHS, m_ICmp(LHSCC, m_Value(Val),
4752 m_ConstantInt(LHSCst))) ||
4753 !match(RHS, m_ICmp(RHSCC, m_Value(Val2),
4754 m_ConstantInt(RHSCst))))
4757 // From here on, we only handle:
4758 // (icmp1 A, C1) | (icmp2 A, C2) --> something simpler.
4759 if (Val != Val2) return 0;
4761 // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
4762 if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE ||
4763 RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE ||
4764 LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE ||
4765 RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE)
4768 // We can't fold (ugt x, C) | (sgt x, C2).
4769 if (!PredicatesFoldable(LHSCC, RHSCC))
4772 // Ensure that the larger constant is on the RHS.
4774 if (CmpInst::isSigned(LHSCC) ||
4775 (ICmpInst::isEquality(LHSCC) &&
4776 CmpInst::isSigned(RHSCC)))
4777 ShouldSwap = LHSCst->getValue().sgt(RHSCst->getValue());
4779 ShouldSwap = LHSCst->getValue().ugt(RHSCst->getValue());
4782 std::swap(LHS, RHS);
4783 std::swap(LHSCst, RHSCst);
4784 std::swap(LHSCC, RHSCC);
4787 // At this point, we know we have have two icmp instructions
4788 // comparing a value against two constants and or'ing the result
4789 // together. Because of the above check, we know that we only have
4790 // ICMP_EQ, ICMP_NE, ICMP_LT, and ICMP_GT here. We also know (from the
4791 // FoldICmpLogical check above), that the two constants are not
4793 assert(LHSCst != RHSCst && "Compares not folded above?");
4796 default: llvm_unreachable("Unknown integer condition code!");
4797 case ICmpInst::ICMP_EQ:
4799 default: llvm_unreachable("Unknown integer condition code!");
4800 case ICmpInst::ICMP_EQ:
4801 if (LHSCst == SubOne(RHSCst)) {
4802 // (X == 13 | X == 14) -> X-13 <u 2
4803 Constant *AddCST = ConstantExpr::getNeg(LHSCst);
4804 Value *Add = Builder->CreateAdd(Val, AddCST, Val->getName()+".off");
4805 AddCST = ConstantExpr::getSub(AddOne(RHSCst), LHSCst);
4806 return new ICmpInst(ICmpInst::ICMP_ULT, Add, AddCST);
4808 break; // (X == 13 | X == 15) -> no change
4809 case ICmpInst::ICMP_UGT: // (X == 13 | X u> 14) -> no change
4810 case ICmpInst::ICMP_SGT: // (X == 13 | X s> 14) -> no change
4812 case ICmpInst::ICMP_NE: // (X == 13 | X != 15) -> X != 15
4813 case ICmpInst::ICMP_ULT: // (X == 13 | X u< 15) -> X u< 15
4814 case ICmpInst::ICMP_SLT: // (X == 13 | X s< 15) -> X s< 15
4815 return ReplaceInstUsesWith(I, RHS);
4818 case ICmpInst::ICMP_NE:
4820 default: llvm_unreachable("Unknown integer condition code!");
4821 case ICmpInst::ICMP_EQ: // (X != 13 | X == 15) -> X != 13
4822 case ICmpInst::ICMP_UGT: // (X != 13 | X u> 15) -> X != 13
4823 case ICmpInst::ICMP_SGT: // (X != 13 | X s> 15) -> X != 13
4824 return ReplaceInstUsesWith(I, LHS);
4825 case ICmpInst::ICMP_NE: // (X != 13 | X != 15) -> true
4826 case ICmpInst::ICMP_ULT: // (X != 13 | X u< 15) -> true
4827 case ICmpInst::ICMP_SLT: // (X != 13 | X s< 15) -> true
4828 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
4831 case ICmpInst::ICMP_ULT:
4833 default: llvm_unreachable("Unknown integer condition code!");
4834 case ICmpInst::ICMP_EQ: // (X u< 13 | X == 14) -> no change
4836 case ICmpInst::ICMP_UGT: // (X u< 13 | X u> 15) -> (X-13) u> 2
4837 // If RHSCst is [us]MAXINT, it is always false. Not handling
4838 // this can cause overflow.
4839 if (RHSCst->isMaxValue(false))
4840 return ReplaceInstUsesWith(I, LHS);
4841 return InsertRangeTest(Val, LHSCst, AddOne(RHSCst),
4843 case ICmpInst::ICMP_SGT: // (X u< 13 | X s> 15) -> no change
4845 case ICmpInst::ICMP_NE: // (X u< 13 | X != 15) -> X != 15
4846 case ICmpInst::ICMP_ULT: // (X u< 13 | X u< 15) -> X u< 15
4847 return ReplaceInstUsesWith(I, RHS);
4848 case ICmpInst::ICMP_SLT: // (X u< 13 | X s< 15) -> no change
4852 case ICmpInst::ICMP_SLT:
4854 default: llvm_unreachable("Unknown integer condition code!");
4855 case ICmpInst::ICMP_EQ: // (X s< 13 | X == 14) -> no change
4857 case ICmpInst::ICMP_SGT: // (X s< 13 | X s> 15) -> (X-13) s> 2
4858 // If RHSCst is [us]MAXINT, it is always false. Not handling
4859 // this can cause overflow.
4860 if (RHSCst->isMaxValue(true))
4861 return ReplaceInstUsesWith(I, LHS);
4862 return InsertRangeTest(Val, LHSCst, AddOne(RHSCst),
4864 case ICmpInst::ICMP_UGT: // (X s< 13 | X u> 15) -> no change
4866 case ICmpInst::ICMP_NE: // (X s< 13 | X != 15) -> X != 15
4867 case ICmpInst::ICMP_SLT: // (X s< 13 | X s< 15) -> X s< 15
4868 return ReplaceInstUsesWith(I, RHS);
4869 case ICmpInst::ICMP_ULT: // (X s< 13 | X u< 15) -> no change
4873 case ICmpInst::ICMP_UGT:
4875 default: llvm_unreachable("Unknown integer condition code!");
4876 case ICmpInst::ICMP_EQ: // (X u> 13 | X == 15) -> X u> 13
4877 case ICmpInst::ICMP_UGT: // (X u> 13 | X u> 15) -> X u> 13
4878 return ReplaceInstUsesWith(I, LHS);
4879 case ICmpInst::ICMP_SGT: // (X u> 13 | X s> 15) -> no change
4881 case ICmpInst::ICMP_NE: // (X u> 13 | X != 15) -> true
4882 case ICmpInst::ICMP_ULT: // (X u> 13 | X u< 15) -> true
4883 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
4884 case ICmpInst::ICMP_SLT: // (X u> 13 | X s< 15) -> no change
4888 case ICmpInst::ICMP_SGT:
4890 default: llvm_unreachable("Unknown integer condition code!");
4891 case ICmpInst::ICMP_EQ: // (X s> 13 | X == 15) -> X > 13
4892 case ICmpInst::ICMP_SGT: // (X s> 13 | X s> 15) -> X > 13
4893 return ReplaceInstUsesWith(I, LHS);
4894 case ICmpInst::ICMP_UGT: // (X s> 13 | X u> 15) -> no change
4896 case ICmpInst::ICMP_NE: // (X s> 13 | X != 15) -> true
4897 case ICmpInst::ICMP_SLT: // (X s> 13 | X s< 15) -> true
4898 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
4899 case ICmpInst::ICMP_ULT: // (X s> 13 | X u< 15) -> no change
4907 Instruction *InstCombiner::FoldOrOfFCmps(Instruction &I, FCmpInst *LHS,
4909 if (LHS->getPredicate() == FCmpInst::FCMP_UNO &&
4910 RHS->getPredicate() == FCmpInst::FCMP_UNO &&
4911 LHS->getOperand(0)->getType() == RHS->getOperand(0)->getType()) {
4912 if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1)))
4913 if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) {
4914 // If either of the constants are nans, then the whole thing returns
4916 if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
4917 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
4919 // Otherwise, no need to compare the two constants, compare the
4921 return new FCmpInst(FCmpInst::FCMP_UNO,
4922 LHS->getOperand(0), RHS->getOperand(0));
4925 // Handle vector zeros. This occurs because the canonical form of
4926 // "fcmp uno x,x" is "fcmp uno x, 0".
4927 if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
4928 isa<ConstantAggregateZero>(RHS->getOperand(1)))
4929 return new FCmpInst(FCmpInst::FCMP_UNO,
4930 LHS->getOperand(0), RHS->getOperand(0));
4935 Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1);
4936 Value *Op1LHS = RHS->getOperand(0), *Op1RHS = RHS->getOperand(1);
4937 FCmpInst::Predicate Op0CC = LHS->getPredicate(), Op1CC = RHS->getPredicate();
4939 if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) {
4940 // Swap RHS operands to match LHS.
4941 Op1CC = FCmpInst::getSwappedPredicate(Op1CC);
4942 std::swap(Op1LHS, Op1RHS);
4944 if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) {
4945 // Simplify (fcmp cc0 x, y) | (fcmp cc1 x, y).
4947 return new FCmpInst((FCmpInst::Predicate)Op0CC,
4949 if (Op0CC == FCmpInst::FCMP_TRUE || Op1CC == FCmpInst::FCMP_TRUE)
4950 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
4951 if (Op0CC == FCmpInst::FCMP_FALSE)
4952 return ReplaceInstUsesWith(I, RHS);
4953 if (Op1CC == FCmpInst::FCMP_FALSE)
4954 return ReplaceInstUsesWith(I, LHS);
4957 unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered);
4958 unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered);
4959 if (Op0Ordered == Op1Ordered) {
4960 // If both are ordered or unordered, return a new fcmp with
4961 // or'ed predicates.
4962 Value *RV = getFCmpValue(Op0Ordered, Op0Pred|Op1Pred,
4963 Op0LHS, Op0RHS, Context);
4964 if (Instruction *I = dyn_cast<Instruction>(RV))
4966 // Otherwise, it's a constant boolean value...
4967 return ReplaceInstUsesWith(I, RV);
4973 /// FoldOrWithConstants - This helper function folds:
4975 /// ((A | B) & C1) | (B & C2)
4981 /// when the XOR of the two constants is "all ones" (-1).
4982 Instruction *InstCombiner::FoldOrWithConstants(BinaryOperator &I, Value *Op,
4983 Value *A, Value *B, Value *C) {
4984 ConstantInt *CI1 = dyn_cast<ConstantInt>(C);
4988 ConstantInt *CI2 = 0;
4989 if (!match(Op, m_And(m_Value(V1), m_ConstantInt(CI2)))) return 0;
4991 APInt Xor = CI1->getValue() ^ CI2->getValue();
4992 if (!Xor.isAllOnesValue()) return 0;
4994 if (V1 == A || V1 == B) {
4995 Value *NewOp = Builder->CreateAnd((V1 == A) ? B : A, CI1);
4996 return BinaryOperator::CreateOr(NewOp, V1);
5002 Instruction *InstCombiner::visitOr(BinaryOperator &I) {
5003 bool Changed = SimplifyCommutative(I);
5004 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5006 if (Value *V = SimplifyOrInst(Op0, Op1, TD))
5007 return ReplaceInstUsesWith(I, V);
5010 // See if we can simplify any instructions used by the instruction whose sole
5011 // purpose is to compute bits we don't care about.
5012 if (SimplifyDemandedInstructionBits(I))
5015 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
5016 ConstantInt *C1 = 0; Value *X = 0;
5017 // (X & C1) | C2 --> (X | C2) & (C1|C2)
5018 if (match(Op0, m_And(m_Value(X), m_ConstantInt(C1))) &&
5020 Value *Or = Builder->CreateOr(X, RHS);
5022 return BinaryOperator::CreateAnd(Or,
5023 ConstantInt::get(*Context, RHS->getValue() | C1->getValue()));
5026 // (X ^ C1) | C2 --> (X | C2) ^ (C1&~C2)
5027 if (match(Op0, m_Xor(m_Value(X), m_ConstantInt(C1))) &&
5029 Value *Or = Builder->CreateOr(X, RHS);
5031 return BinaryOperator::CreateXor(Or,
5032 ConstantInt::get(*Context, C1->getValue() & ~RHS->getValue()));
5035 // Try to fold constant and into select arguments.
5036 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
5037 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
5039 if (isa<PHINode>(Op0))
5040 if (Instruction *NV = FoldOpIntoPhi(I))
5044 Value *A = 0, *B = 0;
5045 ConstantInt *C1 = 0, *C2 = 0;
5047 // (A | B) | C and A | (B | C) -> bswap if possible.
5048 // (A >> B) | (C << D) and (A << B) | (B >> C) -> bswap if possible.
5049 if (match(Op0, m_Or(m_Value(), m_Value())) ||
5050 match(Op1, m_Or(m_Value(), m_Value())) ||
5051 (match(Op0, m_Shift(m_Value(), m_Value())) &&
5052 match(Op1, m_Shift(m_Value(), m_Value())))) {
5053 if (Instruction *BSwap = MatchBSwap(I))
5057 // (X^C)|Y -> (X|Y)^C iff Y&C == 0
5058 if (Op0->hasOneUse() &&
5059 match(Op0, m_Xor(m_Value(A), m_ConstantInt(C1))) &&
5060 MaskedValueIsZero(Op1, C1->getValue())) {
5061 Value *NOr = Builder->CreateOr(A, Op1);
5063 return BinaryOperator::CreateXor(NOr, C1);
5066 // Y|(X^C) -> (X|Y)^C iff Y&C == 0
5067 if (Op1->hasOneUse() &&
5068 match(Op1, m_Xor(m_Value(A), m_ConstantInt(C1))) &&
5069 MaskedValueIsZero(Op0, C1->getValue())) {
5070 Value *NOr = Builder->CreateOr(A, Op0);
5072 return BinaryOperator::CreateXor(NOr, C1);
5076 Value *C = 0, *D = 0;
5077 if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
5078 match(Op1, m_And(m_Value(B), m_Value(D)))) {
5079 Value *V1 = 0, *V2 = 0, *V3 = 0;
5080 C1 = dyn_cast<ConstantInt>(C);
5081 C2 = dyn_cast<ConstantInt>(D);
5082 if (C1 && C2) { // (A & C1)|(B & C2)
5083 // If we have: ((V + N) & C1) | (V & C2)
5084 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
5085 // replace with V+N.
5086 if (C1->getValue() == ~C2->getValue()) {
5087 if ((C2->getValue() & (C2->getValue()+1)) == 0 && // C2 == 0+1+
5088 match(A, m_Add(m_Value(V1), m_Value(V2)))) {
5089 // Add commutes, try both ways.
5090 if (V1 == B && MaskedValueIsZero(V2, C2->getValue()))
5091 return ReplaceInstUsesWith(I, A);
5092 if (V2 == B && MaskedValueIsZero(V1, C2->getValue()))
5093 return ReplaceInstUsesWith(I, A);
5095 // Or commutes, try both ways.
5096 if ((C1->getValue() & (C1->getValue()+1)) == 0 &&
5097 match(B, m_Add(m_Value(V1), m_Value(V2)))) {
5098 // Add commutes, try both ways.
5099 if (V1 == A && MaskedValueIsZero(V2, C1->getValue()))
5100 return ReplaceInstUsesWith(I, B);
5101 if (V2 == A && MaskedValueIsZero(V1, C1->getValue()))
5102 return ReplaceInstUsesWith(I, B);
5105 V1 = 0; V2 = 0; V3 = 0;
5108 // Check to see if we have any common things being and'ed. If so, find the
5109 // terms for V1 & (V2|V3).
5110 if (isOnlyUse(Op0) || isOnlyUse(Op1)) {
5111 if (A == B) // (A & C)|(A & D) == A & (C|D)
5112 V1 = A, V2 = C, V3 = D;
5113 else if (A == D) // (A & C)|(B & A) == A & (B|C)
5114 V1 = A, V2 = B, V3 = C;
5115 else if (C == B) // (A & C)|(C & D) == C & (A|D)
5116 V1 = C, V2 = A, V3 = D;
5117 else if (C == D) // (A & C)|(B & C) == C & (A|B)
5118 V1 = C, V2 = A, V3 = B;
5121 Value *Or = Builder->CreateOr(V2, V3, "tmp");
5122 return BinaryOperator::CreateAnd(V1, Or);
5126 // (A & (C0?-1:0)) | (B & ~(C0?-1:0)) -> C0 ? A : B, and commuted variants
5127 if (Instruction *Match = MatchSelectFromAndOr(A, B, C, D, Context))
5129 if (Instruction *Match = MatchSelectFromAndOr(B, A, D, C, Context))
5131 if (Instruction *Match = MatchSelectFromAndOr(C, B, A, D, Context))
5133 if (Instruction *Match = MatchSelectFromAndOr(D, A, B, C, Context))
5136 // ((A&~B)|(~A&B)) -> A^B
5137 if ((match(C, m_Not(m_Specific(D))) &&
5138 match(B, m_Not(m_Specific(A)))))
5139 return BinaryOperator::CreateXor(A, D);
5140 // ((~B&A)|(~A&B)) -> A^B
5141 if ((match(A, m_Not(m_Specific(D))) &&
5142 match(B, m_Not(m_Specific(C)))))
5143 return BinaryOperator::CreateXor(C, D);
5144 // ((A&~B)|(B&~A)) -> A^B
5145 if ((match(C, m_Not(m_Specific(B))) &&
5146 match(D, m_Not(m_Specific(A)))))
5147 return BinaryOperator::CreateXor(A, B);
5148 // ((~B&A)|(B&~A)) -> A^B
5149 if ((match(A, m_Not(m_Specific(B))) &&
5150 match(D, m_Not(m_Specific(C)))))
5151 return BinaryOperator::CreateXor(C, B);
5154 // (X >> Z) | (Y >> Z) -> (X|Y) >> Z for all shifts.
5155 if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) {
5156 if (BinaryOperator *SI0 = dyn_cast<BinaryOperator>(Op0))
5157 if (SI0->isShift() && SI0->getOpcode() == SI1->getOpcode() &&
5158 SI0->getOperand(1) == SI1->getOperand(1) &&
5159 (SI0->hasOneUse() || SI1->hasOneUse())) {
5160 Value *NewOp = Builder->CreateOr(SI0->getOperand(0), SI1->getOperand(0),
5162 return BinaryOperator::Create(SI1->getOpcode(), NewOp,
5163 SI1->getOperand(1));
5167 // ((A|B)&1)|(B&-2) -> (A&1) | B
5168 if (match(Op0, m_And(m_Or(m_Value(A), m_Value(B)), m_Value(C))) ||
5169 match(Op0, m_And(m_Value(C), m_Or(m_Value(A), m_Value(B))))) {
5170 Instruction *Ret = FoldOrWithConstants(I, Op1, A, B, C);
5171 if (Ret) return Ret;
5173 // (B&-2)|((A|B)&1) -> (A&1) | B
5174 if (match(Op1, m_And(m_Or(m_Value(A), m_Value(B)), m_Value(C))) ||
5175 match(Op1, m_And(m_Value(C), m_Or(m_Value(A), m_Value(B))))) {
5176 Instruction *Ret = FoldOrWithConstants(I, Op0, A, B, C);
5177 if (Ret) return Ret;
5180 // (~A | ~B) == (~(A & B)) - De Morgan's Law
5181 if (Value *Op0NotVal = dyn_castNotVal(Op0))
5182 if (Value *Op1NotVal = dyn_castNotVal(Op1))
5183 if (Op0->hasOneUse() && Op1->hasOneUse()) {
5184 Value *And = Builder->CreateAnd(Op0NotVal, Op1NotVal,
5185 I.getName()+".demorgan");
5186 return BinaryOperator::CreateNot(And);
5189 // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B)
5190 if (ICmpInst *RHS = dyn_cast<ICmpInst>(I.getOperand(1))) {
5191 if (Instruction *R = AssociativeOpt(I, FoldICmpLogical(*this, RHS)))
5194 if (ICmpInst *LHS = dyn_cast<ICmpInst>(I.getOperand(0)))
5195 if (Instruction *Res = FoldOrOfICmps(I, LHS, RHS))
5199 // fold (or (cast A), (cast B)) -> (cast (or A, B))
5200 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
5201 if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
5202 if (Op0C->getOpcode() == Op1C->getOpcode()) {// same cast kind ?
5203 if (!isa<ICmpInst>(Op0C->getOperand(0)) ||
5204 !isa<ICmpInst>(Op1C->getOperand(0))) {
5205 const Type *SrcTy = Op0C->getOperand(0)->getType();
5206 if (SrcTy == Op1C->getOperand(0)->getType() &&
5207 SrcTy->isIntOrIntVector() &&
5208 // Only do this if the casts both really cause code to be
5210 ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0),
5212 ValueRequiresCast(Op1C->getOpcode(), Op1C->getOperand(0),
5214 Value *NewOp = Builder->CreateOr(Op0C->getOperand(0),
5215 Op1C->getOperand(0), I.getName());
5216 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
5223 // (fcmp uno x, c) | (fcmp uno y, c) -> (fcmp uno x, y)
5224 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0))) {
5225 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
5226 if (Instruction *Res = FoldOrOfFCmps(I, LHS, RHS))
5230 return Changed ? &I : 0;
5235 // XorSelf - Implements: X ^ X --> 0
5238 XorSelf(Value *rhs) : RHS(rhs) {}
5239 bool shouldApply(Value *LHS) const { return LHS == RHS; }
5240 Instruction *apply(BinaryOperator &Xor) const {
5247 Instruction *InstCombiner::visitXor(BinaryOperator &I) {
5248 bool Changed = SimplifyCommutative(I);
5249 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5251 if (isa<UndefValue>(Op1)) {
5252 if (isa<UndefValue>(Op0))
5253 // Handle undef ^ undef -> 0 special case. This is a common
5255 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
5256 return ReplaceInstUsesWith(I, Op1); // X ^ undef -> undef
5259 // xor X, X = 0, even if X is nested in a sequence of Xor's.
5260 if (Instruction *Result = AssociativeOpt(I, XorSelf(Op1))) {
5261 assert(Result == &I && "AssociativeOpt didn't work?"); Result=Result;
5262 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
5265 // See if we can simplify any instructions used by the instruction whose sole
5266 // purpose is to compute bits we don't care about.
5267 if (SimplifyDemandedInstructionBits(I))
5269 if (isa<VectorType>(I.getType()))
5270 if (isa<ConstantAggregateZero>(Op1))
5271 return ReplaceInstUsesWith(I, Op0); // X ^ <0,0> -> X
5273 // Is this a ~ operation?
5274 if (Value *NotOp = dyn_castNotVal(&I)) {
5275 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(NotOp)) {
5276 if (Op0I->getOpcode() == Instruction::And ||
5277 Op0I->getOpcode() == Instruction::Or) {
5278 // ~(~X & Y) --> (X | ~Y) - De Morgan's Law
5279 // ~(~X | Y) === (X & ~Y) - De Morgan's Law
5280 if (dyn_castNotVal(Op0I->getOperand(1)))
5281 Op0I->swapOperands();
5282 if (Value *Op0NotVal = dyn_castNotVal(Op0I->getOperand(0))) {
5284 Builder->CreateNot(Op0I->getOperand(1),
5285 Op0I->getOperand(1)->getName()+".not");
5286 if (Op0I->getOpcode() == Instruction::And)
5287 return BinaryOperator::CreateOr(Op0NotVal, NotY);
5288 return BinaryOperator::CreateAnd(Op0NotVal, NotY);
5291 // ~(X & Y) --> (~X | ~Y) - De Morgan's Law
5292 // ~(X | Y) === (~X & ~Y) - De Morgan's Law
5293 if (isFreeToInvert(Op0I->getOperand(0)) &&
5294 isFreeToInvert(Op0I->getOperand(1))) {
5296 Builder->CreateNot(Op0I->getOperand(0), "notlhs");
5298 Builder->CreateNot(Op0I->getOperand(1), "notrhs");
5299 if (Op0I->getOpcode() == Instruction::And)
5300 return BinaryOperator::CreateOr(NotX, NotY);
5301 return BinaryOperator::CreateAnd(NotX, NotY);
5308 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
5309 if (RHS->isOne() && Op0->hasOneUse()) {
5310 // xor (cmp A, B), true = not (cmp A, B) = !cmp A, B
5311 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Op0))
5312 return new ICmpInst(ICI->getInversePredicate(),
5313 ICI->getOperand(0), ICI->getOperand(1));
5315 if (FCmpInst *FCI = dyn_cast<FCmpInst>(Op0))
5316 return new FCmpInst(FCI->getInversePredicate(),
5317 FCI->getOperand(0), FCI->getOperand(1));
5320 // fold (xor(zext(cmp)), 1) and (xor(sext(cmp)), -1) to ext(!cmp).
5321 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
5322 if (CmpInst *CI = dyn_cast<CmpInst>(Op0C->getOperand(0))) {
5323 if (CI->hasOneUse() && Op0C->hasOneUse()) {
5324 Instruction::CastOps Opcode = Op0C->getOpcode();
5325 if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) &&
5326 (RHS == ConstantExpr::getCast(Opcode,
5327 ConstantInt::getTrue(*Context),
5328 Op0C->getDestTy()))) {
5329 CI->setPredicate(CI->getInversePredicate());
5330 return CastInst::Create(Opcode, CI, Op0C->getType());
5336 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
5337 // ~(c-X) == X-c-1 == X+(-c-1)
5338 if (Op0I->getOpcode() == Instruction::Sub && RHS->isAllOnesValue())
5339 if (Constant *Op0I0C = dyn_cast<Constant>(Op0I->getOperand(0))) {
5340 Constant *NegOp0I0C = ConstantExpr::getNeg(Op0I0C);
5341 Constant *ConstantRHS = ConstantExpr::getSub(NegOp0I0C,
5342 ConstantInt::get(I.getType(), 1));
5343 return BinaryOperator::CreateAdd(Op0I->getOperand(1), ConstantRHS);
5346 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
5347 if (Op0I->getOpcode() == Instruction::Add) {
5348 // ~(X-c) --> (-c-1)-X
5349 if (RHS->isAllOnesValue()) {
5350 Constant *NegOp0CI = ConstantExpr::getNeg(Op0CI);
5351 return BinaryOperator::CreateSub(
5352 ConstantExpr::getSub(NegOp0CI,
5353 ConstantInt::get(I.getType(), 1)),
5354 Op0I->getOperand(0));
5355 } else if (RHS->getValue().isSignBit()) {
5356 // (X + C) ^ signbit -> (X + C + signbit)
5357 Constant *C = ConstantInt::get(*Context,
5358 RHS->getValue() + Op0CI->getValue());
5359 return BinaryOperator::CreateAdd(Op0I->getOperand(0), C);
5362 } else if (Op0I->getOpcode() == Instruction::Or) {
5363 // (X|C1)^C2 -> X^(C1|C2) iff X&~C1 == 0
5364 if (MaskedValueIsZero(Op0I->getOperand(0), Op0CI->getValue())) {
5365 Constant *NewRHS = ConstantExpr::getOr(Op0CI, RHS);
5366 // Anything in both C1 and C2 is known to be zero, remove it from
5368 Constant *CommonBits = ConstantExpr::getAnd(Op0CI, RHS);
5369 NewRHS = ConstantExpr::getAnd(NewRHS,
5370 ConstantExpr::getNot(CommonBits));
5372 I.setOperand(0, Op0I->getOperand(0));
5373 I.setOperand(1, NewRHS);
5380 // Try to fold constant and into select arguments.
5381 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
5382 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
5384 if (isa<PHINode>(Op0))
5385 if (Instruction *NV = FoldOpIntoPhi(I))
5389 if (Value *X = dyn_castNotVal(Op0)) // ~A ^ A == -1
5391 return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
5393 if (Value *X = dyn_castNotVal(Op1)) // A ^ ~A == -1
5395 return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
5398 BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1);
5401 if (match(Op1I, m_Or(m_Value(A), m_Value(B)))) {
5402 if (A == Op0) { // B^(B|A) == (A|B)^B
5403 Op1I->swapOperands();
5405 std::swap(Op0, Op1);
5406 } else if (B == Op0) { // B^(A|B) == (A|B)^B
5407 I.swapOperands(); // Simplified below.
5408 std::swap(Op0, Op1);
5410 } else if (match(Op1I, m_Xor(m_Specific(Op0), m_Value(B)))) {
5411 return ReplaceInstUsesWith(I, B); // A^(A^B) == B
5412 } else if (match(Op1I, m_Xor(m_Value(A), m_Specific(Op0)))) {
5413 return ReplaceInstUsesWith(I, A); // A^(B^A) == B
5414 } else if (match(Op1I, m_And(m_Value(A), m_Value(B))) &&
5416 if (A == Op0) { // A^(A&B) -> A^(B&A)
5417 Op1I->swapOperands();
5420 if (B == Op0) { // A^(B&A) -> (B&A)^A
5421 I.swapOperands(); // Simplified below.
5422 std::swap(Op0, Op1);
5427 BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0);
5430 if (match(Op0I, m_Or(m_Value(A), m_Value(B))) &&
5431 Op0I->hasOneUse()) {
5432 if (A == Op1) // (B|A)^B == (A|B)^B
5434 if (B == Op1) // (A|B)^B == A & ~B
5435 return BinaryOperator::CreateAnd(A, Builder->CreateNot(Op1, "tmp"));
5436 } else if (match(Op0I, m_Xor(m_Specific(Op1), m_Value(B)))) {
5437 return ReplaceInstUsesWith(I, B); // (A^B)^A == B
5438 } else if (match(Op0I, m_Xor(m_Value(A), m_Specific(Op1)))) {
5439 return ReplaceInstUsesWith(I, A); // (B^A)^A == B
5440 } else if (match(Op0I, m_And(m_Value(A), m_Value(B))) &&
5442 if (A == Op1) // (A&B)^A -> (B&A)^A
5444 if (B == Op1 && // (B&A)^A == ~B & A
5445 !isa<ConstantInt>(Op1)) { // Canonical form is (B&C)^C
5446 return BinaryOperator::CreateAnd(Builder->CreateNot(A, "tmp"), Op1);
5451 // (X >> Z) ^ (Y >> Z) -> (X^Y) >> Z for all shifts.
5452 if (Op0I && Op1I && Op0I->isShift() &&
5453 Op0I->getOpcode() == Op1I->getOpcode() &&
5454 Op0I->getOperand(1) == Op1I->getOperand(1) &&
5455 (Op1I->hasOneUse() || Op1I->hasOneUse())) {
5457 Builder->CreateXor(Op0I->getOperand(0), Op1I->getOperand(0),
5459 return BinaryOperator::Create(Op1I->getOpcode(), NewOp,
5460 Op1I->getOperand(1));
5464 Value *A, *B, *C, *D;
5465 // (A & B)^(A | B) -> A ^ B
5466 if (match(Op0I, m_And(m_Value(A), m_Value(B))) &&
5467 match(Op1I, m_Or(m_Value(C), m_Value(D)))) {
5468 if ((A == C && B == D) || (A == D && B == C))
5469 return BinaryOperator::CreateXor(A, B);
5471 // (A | B)^(A & B) -> A ^ B
5472 if (match(Op0I, m_Or(m_Value(A), m_Value(B))) &&
5473 match(Op1I, m_And(m_Value(C), m_Value(D)))) {
5474 if ((A == C && B == D) || (A == D && B == C))
5475 return BinaryOperator::CreateXor(A, B);
5479 if ((Op0I->hasOneUse() || Op1I->hasOneUse()) &&
5480 match(Op0I, m_And(m_Value(A), m_Value(B))) &&
5481 match(Op1I, m_And(m_Value(C), m_Value(D)))) {
5482 // (X & Y)^(X & Y) -> (Y^Z) & X
5483 Value *X = 0, *Y = 0, *Z = 0;
5485 X = A, Y = B, Z = D;
5487 X = A, Y = B, Z = C;
5489 X = B, Y = A, Z = D;
5491 X = B, Y = A, Z = C;
5494 Value *NewOp = Builder->CreateXor(Y, Z, Op0->getName());
5495 return BinaryOperator::CreateAnd(NewOp, X);
5500 // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B)
5501 if (ICmpInst *RHS = dyn_cast<ICmpInst>(I.getOperand(1)))
5502 if (Instruction *R = AssociativeOpt(I, FoldICmpLogical(*this, RHS)))
5505 // fold (xor (cast A), (cast B)) -> (cast (xor A, B))
5506 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
5507 if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
5508 if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind?
5509 const Type *SrcTy = Op0C->getOperand(0)->getType();
5510 if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isInteger() &&
5511 // Only do this if the casts both really cause code to be generated.
5512 ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0),
5514 ValueRequiresCast(Op1C->getOpcode(), Op1C->getOperand(0),
5516 Value *NewOp = Builder->CreateXor(Op0C->getOperand(0),
5517 Op1C->getOperand(0), I.getName());
5518 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
5523 return Changed ? &I : 0;
5526 static ConstantInt *ExtractElement(Constant *V, Constant *Idx,
5527 LLVMContext *Context) {
5528 return cast<ConstantInt>(ConstantExpr::getExtractElement(V, Idx));
5531 static bool HasAddOverflow(ConstantInt *Result,
5532 ConstantInt *In1, ConstantInt *In2,
5535 if (In2->getValue().isNegative())
5536 return Result->getValue().sgt(In1->getValue());
5538 return Result->getValue().slt(In1->getValue());
5540 return Result->getValue().ult(In1->getValue());
5543 /// AddWithOverflow - Compute Result = In1+In2, returning true if the result
5544 /// overflowed for this type.
5545 static bool AddWithOverflow(Constant *&Result, Constant *In1,
5546 Constant *In2, LLVMContext *Context,
5547 bool IsSigned = false) {
5548 Result = ConstantExpr::getAdd(In1, In2);
5550 if (const VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
5551 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
5552 Constant *Idx = ConstantInt::get(Type::getInt32Ty(*Context), i);
5553 if (HasAddOverflow(ExtractElement(Result, Idx, Context),
5554 ExtractElement(In1, Idx, Context),
5555 ExtractElement(In2, Idx, Context),
5562 return HasAddOverflow(cast<ConstantInt>(Result),
5563 cast<ConstantInt>(In1), cast<ConstantInt>(In2),
5567 static bool HasSubOverflow(ConstantInt *Result,
5568 ConstantInt *In1, ConstantInt *In2,
5571 if (In2->getValue().isNegative())
5572 return Result->getValue().slt(In1->getValue());
5574 return Result->getValue().sgt(In1->getValue());
5576 return Result->getValue().ugt(In1->getValue());
5579 /// SubWithOverflow - Compute Result = In1-In2, returning true if the result
5580 /// overflowed for this type.
5581 static bool SubWithOverflow(Constant *&Result, Constant *In1,
5582 Constant *In2, LLVMContext *Context,
5583 bool IsSigned = false) {
5584 Result = ConstantExpr::getSub(In1, In2);
5586 if (const VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
5587 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
5588 Constant *Idx = ConstantInt::get(Type::getInt32Ty(*Context), i);
5589 if (HasSubOverflow(ExtractElement(Result, Idx, Context),
5590 ExtractElement(In1, Idx, Context),
5591 ExtractElement(In2, Idx, Context),
5598 return HasSubOverflow(cast<ConstantInt>(Result),
5599 cast<ConstantInt>(In1), cast<ConstantInt>(In2),
5604 /// FoldGEPICmp - Fold comparisons between a GEP instruction and something
5605 /// else. At this point we know that the GEP is on the LHS of the comparison.
5606 Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
5607 ICmpInst::Predicate Cond,
5609 // Look through bitcasts.
5610 if (BitCastInst *BCI = dyn_cast<BitCastInst>(RHS))
5611 RHS = BCI->getOperand(0);
5613 Value *PtrBase = GEPLHS->getOperand(0);
5614 if (TD && PtrBase == RHS && GEPLHS->isInBounds()) {
5615 // ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0).
5616 // This transformation (ignoring the base and scales) is valid because we
5617 // know pointers can't overflow since the gep is inbounds. See if we can
5618 // output an optimized form.
5619 Value *Offset = EvaluateGEPOffsetExpression(GEPLHS, I, *this);
5621 // If not, synthesize the offset the hard way.
5623 Offset = EmitGEPOffset(GEPLHS, *this);
5624 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Offset,
5625 Constant::getNullValue(Offset->getType()));
5626 } else if (GEPOperator *GEPRHS = dyn_cast<GEPOperator>(RHS)) {
5627 // If the base pointers are different, but the indices are the same, just
5628 // compare the base pointer.
5629 if (PtrBase != GEPRHS->getOperand(0)) {
5630 bool IndicesTheSame = GEPLHS->getNumOperands()==GEPRHS->getNumOperands();
5631 IndicesTheSame &= GEPLHS->getOperand(0)->getType() ==
5632 GEPRHS->getOperand(0)->getType();
5634 for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
5635 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
5636 IndicesTheSame = false;
5640 // If all indices are the same, just compare the base pointers.
5642 return new ICmpInst(ICmpInst::getSignedPredicate(Cond),
5643 GEPLHS->getOperand(0), GEPRHS->getOperand(0));
5645 // Otherwise, the base pointers are different and the indices are
5646 // different, bail out.
5650 // If one of the GEPs has all zero indices, recurse.
5651 bool AllZeros = true;
5652 for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
5653 if (!isa<Constant>(GEPLHS->getOperand(i)) ||
5654 !cast<Constant>(GEPLHS->getOperand(i))->isNullValue()) {
5659 return FoldGEPICmp(GEPRHS, GEPLHS->getOperand(0),
5660 ICmpInst::getSwappedPredicate(Cond), I);
5662 // If the other GEP has all zero indices, recurse.
5664 for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
5665 if (!isa<Constant>(GEPRHS->getOperand(i)) ||
5666 !cast<Constant>(GEPRHS->getOperand(i))->isNullValue()) {
5671 return FoldGEPICmp(GEPLHS, GEPRHS->getOperand(0), Cond, I);
5673 if (GEPLHS->getNumOperands() == GEPRHS->getNumOperands()) {
5674 // If the GEPs only differ by one index, compare it.
5675 unsigned NumDifferences = 0; // Keep track of # differences.
5676 unsigned DiffOperand = 0; // The operand that differs.
5677 for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
5678 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
5679 if (GEPLHS->getOperand(i)->getType()->getPrimitiveSizeInBits() !=
5680 GEPRHS->getOperand(i)->getType()->getPrimitiveSizeInBits()) {
5681 // Irreconcilable differences.
5685 if (NumDifferences++) break;
5690 if (NumDifferences == 0) // SAME GEP?
5691 return ReplaceInstUsesWith(I, // No comparison is needed here.
5692 ConstantInt::get(Type::getInt1Ty(*Context),
5693 ICmpInst::isTrueWhenEqual(Cond)));
5695 else if (NumDifferences == 1) {
5696 Value *LHSV = GEPLHS->getOperand(DiffOperand);
5697 Value *RHSV = GEPRHS->getOperand(DiffOperand);
5698 // Make sure we do a signed comparison here.
5699 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), LHSV, RHSV);
5703 // Only lower this if the icmp is the only user of the GEP or if we expect
5704 // the result to fold to a constant!
5706 (isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) &&
5707 (isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) {
5708 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) ---> (OFFSET1 cmp OFFSET2)
5709 Value *L = EmitGEPOffset(GEPLHS, *this);
5710 Value *R = EmitGEPOffset(GEPRHS, *this);
5711 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), L, R);
5717 /// FoldFCmp_IntToFP_Cst - Fold fcmp ([us]itofp x, cst) if possible.
5719 Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I,
5722 if (!isa<ConstantFP>(RHSC)) return 0;
5723 const APFloat &RHS = cast<ConstantFP>(RHSC)->getValueAPF();
5725 // Get the width of the mantissa. We don't want to hack on conversions that
5726 // might lose information from the integer, e.g. "i64 -> float"
5727 int MantissaWidth = LHSI->getType()->getFPMantissaWidth();
5728 if (MantissaWidth == -1) return 0; // Unknown.
5730 // Check to see that the input is converted from an integer type that is small
5731 // enough that preserves all bits. TODO: check here for "known" sign bits.
5732 // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e.
5733 unsigned InputSize = LHSI->getOperand(0)->getType()->getScalarSizeInBits();
5735 // If this is a uitofp instruction, we need an extra bit to hold the sign.
5736 bool LHSUnsigned = isa<UIToFPInst>(LHSI);
5740 // If the conversion would lose info, don't hack on this.
5741 if ((int)InputSize > MantissaWidth)
5744 // Otherwise, we can potentially simplify the comparison. We know that it
5745 // will always come through as an integer value and we know the constant is
5746 // not a NAN (it would have been previously simplified).
5747 assert(!RHS.isNaN() && "NaN comparison not already folded!");
5749 ICmpInst::Predicate Pred;
5750 switch (I.getPredicate()) {
5751 default: llvm_unreachable("Unexpected predicate!");
5752 case FCmpInst::FCMP_UEQ:
5753 case FCmpInst::FCMP_OEQ:
5754 Pred = ICmpInst::ICMP_EQ;
5756 case FCmpInst::FCMP_UGT:
5757 case FCmpInst::FCMP_OGT:
5758 Pred = LHSUnsigned ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_SGT;
5760 case FCmpInst::FCMP_UGE:
5761 case FCmpInst::FCMP_OGE:
5762 Pred = LHSUnsigned ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE;
5764 case FCmpInst::FCMP_ULT:
5765 case FCmpInst::FCMP_OLT:
5766 Pred = LHSUnsigned ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_SLT;
5768 case FCmpInst::FCMP_ULE:
5769 case FCmpInst::FCMP_OLE:
5770 Pred = LHSUnsigned ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_SLE;
5772 case FCmpInst::FCMP_UNE:
5773 case FCmpInst::FCMP_ONE:
5774 Pred = ICmpInst::ICMP_NE;
5776 case FCmpInst::FCMP_ORD:
5777 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
5778 case FCmpInst::FCMP_UNO:
5779 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
5782 const IntegerType *IntTy = cast<IntegerType>(LHSI->getOperand(0)->getType());
5784 // Now we know that the APFloat is a normal number, zero or inf.
5786 // See if the FP constant is too large for the integer. For example,
5787 // comparing an i8 to 300.0.
5788 unsigned IntWidth = IntTy->getScalarSizeInBits();
5791 // If the RHS value is > SignedMax, fold the comparison. This handles +INF
5792 // and large values.
5793 APFloat SMax(RHS.getSemantics(), APFloat::fcZero, false);
5794 SMax.convertFromAPInt(APInt::getSignedMaxValue(IntWidth), true,
5795 APFloat::rmNearestTiesToEven);
5796 if (SMax.compare(RHS) == APFloat::cmpLessThan) { // smax < 13123.0
5797 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT ||
5798 Pred == ICmpInst::ICMP_SLE)
5799 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
5800 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
5803 // If the RHS value is > UnsignedMax, fold the comparison. This handles
5804 // +INF and large values.
5805 APFloat UMax(RHS.getSemantics(), APFloat::fcZero, false);
5806 UMax.convertFromAPInt(APInt::getMaxValue(IntWidth), false,
5807 APFloat::rmNearestTiesToEven);
5808 if (UMax.compare(RHS) == APFloat::cmpLessThan) { // umax < 13123.0
5809 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT ||
5810 Pred == ICmpInst::ICMP_ULE)
5811 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
5812 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
5817 // See if the RHS value is < SignedMin.
5818 APFloat SMin(RHS.getSemantics(), APFloat::fcZero, false);
5819 SMin.convertFromAPInt(APInt::getSignedMinValue(IntWidth), true,
5820 APFloat::rmNearestTiesToEven);
5821 if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // smin > 12312.0
5822 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT ||
5823 Pred == ICmpInst::ICMP_SGE)
5824 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
5825 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
5829 // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or
5830 // [0, UMAX], but it may still be fractional. See if it is fractional by
5831 // casting the FP value to the integer value and back, checking for equality.
5832 // Don't do this for zero, because -0.0 is not fractional.
5833 Constant *RHSInt = LHSUnsigned
5834 ? ConstantExpr::getFPToUI(RHSC, IntTy)
5835 : ConstantExpr::getFPToSI(RHSC, IntTy);
5836 if (!RHS.isZero()) {
5837 bool Equal = LHSUnsigned
5838 ? ConstantExpr::getUIToFP(RHSInt, RHSC->getType()) == RHSC
5839 : ConstantExpr::getSIToFP(RHSInt, RHSC->getType()) == RHSC;
5841 // If we had a comparison against a fractional value, we have to adjust
5842 // the compare predicate and sometimes the value. RHSC is rounded towards
5843 // zero at this point.
5845 default: llvm_unreachable("Unexpected integer comparison!");
5846 case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true
5847 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
5848 case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false
5849 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
5850 case ICmpInst::ICMP_ULE:
5851 // (float)int <= 4.4 --> int <= 4
5852 // (float)int <= -4.4 --> false
5853 if (RHS.isNegative())
5854 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
5856 case ICmpInst::ICMP_SLE:
5857 // (float)int <= 4.4 --> int <= 4
5858 // (float)int <= -4.4 --> int < -4
5859 if (RHS.isNegative())
5860 Pred = ICmpInst::ICMP_SLT;
5862 case ICmpInst::ICMP_ULT:
5863 // (float)int < -4.4 --> false
5864 // (float)int < 4.4 --> int <= 4
5865 if (RHS.isNegative())
5866 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
5867 Pred = ICmpInst::ICMP_ULE;
5869 case ICmpInst::ICMP_SLT:
5870 // (float)int < -4.4 --> int < -4
5871 // (float)int < 4.4 --> int <= 4
5872 if (!RHS.isNegative())
5873 Pred = ICmpInst::ICMP_SLE;
5875 case ICmpInst::ICMP_UGT:
5876 // (float)int > 4.4 --> int > 4
5877 // (float)int > -4.4 --> true
5878 if (RHS.isNegative())
5879 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
5881 case ICmpInst::ICMP_SGT:
5882 // (float)int > 4.4 --> int > 4
5883 // (float)int > -4.4 --> int >= -4
5884 if (RHS.isNegative())
5885 Pred = ICmpInst::ICMP_SGE;
5887 case ICmpInst::ICMP_UGE:
5888 // (float)int >= -4.4 --> true
5889 // (float)int >= 4.4 --> int > 4
5890 if (!RHS.isNegative())
5891 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
5892 Pred = ICmpInst::ICMP_UGT;
5894 case ICmpInst::ICMP_SGE:
5895 // (float)int >= -4.4 --> int >= -4
5896 // (float)int >= 4.4 --> int > 4
5897 if (!RHS.isNegative())
5898 Pred = ICmpInst::ICMP_SGT;
5904 // Lower this FP comparison into an appropriate integer version of the
5906 return new ICmpInst(Pred, LHSI->getOperand(0), RHSInt);
5909 Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
5910 bool Changed = false;
5912 /// Orders the operands of the compare so that they are listed from most
5913 /// complex to least complex. This puts constants before unary operators,
5914 /// before binary operators.
5915 if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) {
5920 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5922 if (Value *V = SimplifyFCmpInst(I.getPredicate(), Op0, Op1, TD))
5923 return ReplaceInstUsesWith(I, V);
5925 // Simplify 'fcmp pred X, X'
5927 switch (I.getPredicate()) {
5928 default: llvm_unreachable("Unknown predicate!");
5929 case FCmpInst::FCMP_UNO: // True if unordered: isnan(X) | isnan(Y)
5930 case FCmpInst::FCMP_ULT: // True if unordered or less than
5931 case FCmpInst::FCMP_UGT: // True if unordered or greater than
5932 case FCmpInst::FCMP_UNE: // True if unordered or not equal
5933 // Canonicalize these to be 'fcmp uno %X, 0.0'.
5934 I.setPredicate(FCmpInst::FCMP_UNO);
5935 I.setOperand(1, Constant::getNullValue(Op0->getType()));
5938 case FCmpInst::FCMP_ORD: // True if ordered (no nans)
5939 case FCmpInst::FCMP_OEQ: // True if ordered and equal
5940 case FCmpInst::FCMP_OGE: // True if ordered and greater than or equal
5941 case FCmpInst::FCMP_OLE: // True if ordered and less than or equal
5942 // Canonicalize these to be 'fcmp ord %X, 0.0'.
5943 I.setPredicate(FCmpInst::FCMP_ORD);
5944 I.setOperand(1, Constant::getNullValue(Op0->getType()));
5949 // Handle fcmp with constant RHS
5950 if (Constant *RHSC = dyn_cast<Constant>(Op1)) {
5951 if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
5952 switch (LHSI->getOpcode()) {
5953 case Instruction::PHI:
5954 // Only fold fcmp into the PHI if the phi and fcmp are in the same
5955 // block. If in the same block, we're encouraging jump threading. If
5956 // not, we are just pessimizing the code by making an i1 phi.
5957 if (LHSI->getParent() == I.getParent())
5958 if (Instruction *NV = FoldOpIntoPhi(I, true))
5961 case Instruction::SIToFP:
5962 case Instruction::UIToFP:
5963 if (Instruction *NV = FoldFCmp_IntToFP_Cst(I, LHSI, RHSC))
5966 case Instruction::Select:
5967 // If either operand of the select is a constant, we can fold the
5968 // comparison into the select arms, which will cause one to be
5969 // constant folded and the select turned into a bitwise or.
5970 Value *Op1 = 0, *Op2 = 0;
5971 if (LHSI->hasOneUse()) {
5972 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) {
5973 // Fold the known value into the constant operand.
5974 Op1 = ConstantExpr::getCompare(I.getPredicate(), C, RHSC);
5975 // Insert a new FCmp of the other select operand.
5976 Op2 = Builder->CreateFCmp(I.getPredicate(),
5977 LHSI->getOperand(2), RHSC, I.getName());
5978 } else if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) {
5979 // Fold the known value into the constant operand.
5980 Op2 = ConstantExpr::getCompare(I.getPredicate(), C, RHSC);
5981 // Insert a new FCmp of the other select operand.
5982 Op1 = Builder->CreateFCmp(I.getPredicate(), LHSI->getOperand(1),
5988 return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
5993 return Changed ? &I : 0;
5996 Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
5997 bool Changed = false;
5999 /// Orders the operands of the compare so that they are listed from most
6000 /// complex to least complex. This puts constants before unary operators,
6001 /// before binary operators.
6002 if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) {
6007 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
6009 if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, TD))
6010 return ReplaceInstUsesWith(I, V);
6012 const Type *Ty = Op0->getType();
6014 // icmp's with boolean values can always be turned into bitwise operations
6015 if (Ty == Type::getInt1Ty(*Context)) {
6016 switch (I.getPredicate()) {
6017 default: llvm_unreachable("Invalid icmp instruction!");
6018 case ICmpInst::ICMP_EQ: { // icmp eq i1 A, B -> ~(A^B)
6019 Value *Xor = Builder->CreateXor(Op0, Op1, I.getName()+"tmp");
6020 return BinaryOperator::CreateNot(Xor);
6022 case ICmpInst::ICMP_NE: // icmp eq i1 A, B -> A^B
6023 return BinaryOperator::CreateXor(Op0, Op1);
6025 case ICmpInst::ICMP_UGT:
6026 std::swap(Op0, Op1); // Change icmp ugt -> icmp ult
6028 case ICmpInst::ICMP_ULT:{ // icmp ult i1 A, B -> ~A & B
6029 Value *Not = Builder->CreateNot(Op0, I.getName()+"tmp");
6030 return BinaryOperator::CreateAnd(Not, Op1);
6032 case ICmpInst::ICMP_SGT:
6033 std::swap(Op0, Op1); // Change icmp sgt -> icmp slt
6035 case ICmpInst::ICMP_SLT: { // icmp slt i1 A, B -> A & ~B
6036 Value *Not = Builder->CreateNot(Op1, I.getName()+"tmp");
6037 return BinaryOperator::CreateAnd(Not, Op0);
6039 case ICmpInst::ICMP_UGE:
6040 std::swap(Op0, Op1); // Change icmp uge -> icmp ule
6042 case ICmpInst::ICMP_ULE: { // icmp ule i1 A, B -> ~A | B
6043 Value *Not = Builder->CreateNot(Op0, I.getName()+"tmp");
6044 return BinaryOperator::CreateOr(Not, Op1);
6046 case ICmpInst::ICMP_SGE:
6047 std::swap(Op0, Op1); // Change icmp sge -> icmp sle
6049 case ICmpInst::ICMP_SLE: { // icmp sle i1 A, B -> A | ~B
6050 Value *Not = Builder->CreateNot(Op1, I.getName()+"tmp");
6051 return BinaryOperator::CreateOr(Not, Op0);
6056 unsigned BitWidth = 0;
6058 BitWidth = TD->getTypeSizeInBits(Ty->getScalarType());
6059 else if (Ty->isIntOrIntVector())
6060 BitWidth = Ty->getScalarSizeInBits();
6062 bool isSignBit = false;
6064 // See if we are doing a comparison with a constant.
6065 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
6066 Value *A = 0, *B = 0;
6068 // (icmp ne/eq (sub A B) 0) -> (icmp ne/eq A, B)
6069 if (I.isEquality() && CI->isNullValue() &&
6070 match(Op0, m_Sub(m_Value(A), m_Value(B)))) {
6071 // (icmp cond A B) if cond is equality
6072 return new ICmpInst(I.getPredicate(), A, B);
6075 // If we have an icmp le or icmp ge instruction, turn it into the
6076 // appropriate icmp lt or icmp gt instruction. This allows us to rely on
6077 // them being folded in the code below. The SimplifyICmpInst code has
6078 // already handled the edge cases for us, so we just assert on them.
6079 switch (I.getPredicate()) {
6081 case ICmpInst::ICMP_ULE:
6082 assert(!CI->isMaxValue(false)); // A <=u MAX -> TRUE
6083 return new ICmpInst(ICmpInst::ICMP_ULT, Op0,
6085 case ICmpInst::ICMP_SLE:
6086 assert(!CI->isMaxValue(true)); // A <=s MAX -> TRUE
6087 return new ICmpInst(ICmpInst::ICMP_SLT, Op0,
6089 case ICmpInst::ICMP_UGE:
6090 assert(!CI->isMinValue(false)); // A >=u MIN -> TRUE
6091 return new ICmpInst(ICmpInst::ICMP_UGT, Op0,
6093 case ICmpInst::ICMP_SGE:
6094 assert(!CI->isMinValue(true)); // A >=s MIN -> TRUE
6095 return new ICmpInst(ICmpInst::ICMP_SGT, Op0,
6099 // If this comparison is a normal comparison, it demands all
6100 // bits, if it is a sign bit comparison, it only demands the sign bit.
6102 isSignBit = isSignBitCheck(I.getPredicate(), CI, UnusedBit);
6105 // See if we can fold the comparison based on range information we can get
6106 // by checking whether bits are known to be zero or one in the input.
6107 if (BitWidth != 0) {
6108 APInt Op0KnownZero(BitWidth, 0), Op0KnownOne(BitWidth, 0);
6109 APInt Op1KnownZero(BitWidth, 0), Op1KnownOne(BitWidth, 0);
6111 if (SimplifyDemandedBits(I.getOperandUse(0),
6112 isSignBit ? APInt::getSignBit(BitWidth)
6113 : APInt::getAllOnesValue(BitWidth),
6114 Op0KnownZero, Op0KnownOne, 0))
6116 if (SimplifyDemandedBits(I.getOperandUse(1),
6117 APInt::getAllOnesValue(BitWidth),
6118 Op1KnownZero, Op1KnownOne, 0))
6121 // Given the known and unknown bits, compute a range that the LHS could be
6122 // in. Compute the Min, Max and RHS values based on the known bits. For the
6123 // EQ and NE we use unsigned values.
6124 APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0);
6125 APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0);
6127 ComputeSignedMinMaxValuesFromKnownBits(Op0KnownZero, Op0KnownOne,
6129 ComputeSignedMinMaxValuesFromKnownBits(Op1KnownZero, Op1KnownOne,
6132 ComputeUnsignedMinMaxValuesFromKnownBits(Op0KnownZero, Op0KnownOne,
6134 ComputeUnsignedMinMaxValuesFromKnownBits(Op1KnownZero, Op1KnownOne,
6138 // If Min and Max are known to be the same, then SimplifyDemandedBits
6139 // figured out that the LHS is a constant. Just constant fold this now so
6140 // that code below can assume that Min != Max.
6141 if (!isa<Constant>(Op0) && Op0Min == Op0Max)
6142 return new ICmpInst(I.getPredicate(),
6143 ConstantInt::get(*Context, Op0Min), Op1);
6144 if (!isa<Constant>(Op1) && Op1Min == Op1Max)
6145 return new ICmpInst(I.getPredicate(), Op0,
6146 ConstantInt::get(*Context, Op1Min));
6148 // Based on the range information we know about the LHS, see if we can
6149 // simplify this comparison. For example, (x&4) < 8 is always true.
6150 switch (I.getPredicate()) {
6151 default: llvm_unreachable("Unknown icmp opcode!");
6152 case ICmpInst::ICMP_EQ:
6153 if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max))
6154 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
6156 case ICmpInst::ICMP_NE:
6157 if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max))
6158 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6160 case ICmpInst::ICMP_ULT:
6161 if (Op0Max.ult(Op1Min)) // A <u B -> true if max(A) < min(B)
6162 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6163 if (Op0Min.uge(Op1Max)) // A <u B -> false if min(A) >= max(B)
6164 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
6165 if (Op1Min == Op0Max) // A <u B -> A != B if max(A) == min(B)
6166 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
6167 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
6168 if (Op1Max == Op0Min+1) // A <u C -> A == C-1 if min(A)+1 == C
6169 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
6172 // (x <u 2147483648) -> (x >s -1) -> true if sign bit clear
6173 if (CI->isMinValue(true))
6174 return new ICmpInst(ICmpInst::ICMP_SGT, Op0,
6175 Constant::getAllOnesValue(Op0->getType()));
6178 case ICmpInst::ICMP_UGT:
6179 if (Op0Min.ugt(Op1Max)) // A >u B -> true if min(A) > max(B)
6180 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6181 if (Op0Max.ule(Op1Min)) // A >u B -> false if max(A) <= max(B)
6182 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
6184 if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B)
6185 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
6186 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
6187 if (Op1Min == Op0Max-1) // A >u C -> A == C+1 if max(a)-1 == C
6188 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
6191 // (x >u 2147483647) -> (x <s 0) -> true if sign bit set
6192 if (CI->isMaxValue(true))
6193 return new ICmpInst(ICmpInst::ICMP_SLT, Op0,
6194 Constant::getNullValue(Op0->getType()));
6197 case ICmpInst::ICMP_SLT:
6198 if (Op0Max.slt(Op1Min)) // A <s B -> true if max(A) < min(C)
6199 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6200 if (Op0Min.sge(Op1Max)) // A <s B -> false if min(A) >= max(C)
6201 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
6202 if (Op1Min == Op0Max) // A <s B -> A != B if max(A) == min(B)
6203 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
6204 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
6205 if (Op1Max == Op0Min+1) // A <s C -> A == C-1 if min(A)+1 == C
6206 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
6210 case ICmpInst::ICMP_SGT:
6211 if (Op0Min.sgt(Op1Max)) // A >s B -> true if min(A) > max(B)
6212 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6213 if (Op0Max.sle(Op1Min)) // A >s B -> false if max(A) <= min(B)
6214 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
6216 if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B)
6217 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
6218 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
6219 if (Op1Min == Op0Max-1) // A >s C -> A == C+1 if max(A)-1 == C
6220 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
6224 case ICmpInst::ICMP_SGE:
6225 assert(!isa<ConstantInt>(Op1) && "ICMP_SGE with ConstantInt not folded!");
6226 if (Op0Min.sge(Op1Max)) // A >=s B -> true if min(A) >= max(B)
6227 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6228 if (Op0Max.slt(Op1Min)) // A >=s B -> false if max(A) < min(B)
6229 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
6231 case ICmpInst::ICMP_SLE:
6232 assert(!isa<ConstantInt>(Op1) && "ICMP_SLE with ConstantInt not folded!");
6233 if (Op0Max.sle(Op1Min)) // A <=s B -> true if max(A) <= min(B)
6234 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6235 if (Op0Min.sgt(Op1Max)) // A <=s B -> false if min(A) > max(B)
6236 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
6238 case ICmpInst::ICMP_UGE:
6239 assert(!isa<ConstantInt>(Op1) && "ICMP_UGE with ConstantInt not folded!");
6240 if (Op0Min.uge(Op1Max)) // A >=u B -> true if min(A) >= max(B)
6241 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6242 if (Op0Max.ult(Op1Min)) // A >=u B -> false if max(A) < min(B)
6243 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
6245 case ICmpInst::ICMP_ULE:
6246 assert(!isa<ConstantInt>(Op1) && "ICMP_ULE with ConstantInt not folded!");
6247 if (Op0Max.ule(Op1Min)) // A <=u B -> true if max(A) <= min(B)
6248 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6249 if (Op0Min.ugt(Op1Max)) // A <=u B -> false if min(A) > max(B)
6250 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
6254 // Turn a signed comparison into an unsigned one if both operands
6255 // are known to have the same sign.
6257 ((Op0KnownZero.isNegative() && Op1KnownZero.isNegative()) ||
6258 (Op0KnownOne.isNegative() && Op1KnownOne.isNegative())))
6259 return new ICmpInst(I.getUnsignedPredicate(), Op0, Op1);
6262 // Test if the ICmpInst instruction is used exclusively by a select as
6263 // part of a minimum or maximum operation. If so, refrain from doing
6264 // any other folding. This helps out other analyses which understand
6265 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
6266 // and CodeGen. And in this case, at least one of the comparison
6267 // operands has at least one user besides the compare (the select),
6268 // which would often largely negate the benefit of folding anyway.
6270 if (SelectInst *SI = dyn_cast<SelectInst>(*I.use_begin()))
6271 if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) ||
6272 (SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1))
6275 // See if we are doing a comparison between a constant and an instruction that
6276 // can be folded into the comparison.
6277 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
6278 // Since the RHS is a ConstantInt (CI), if the left hand side is an
6279 // instruction, see if that instruction also has constants so that the
6280 // instruction can be folded into the icmp
6281 if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
6282 if (Instruction *Res = visitICmpInstWithInstAndIntCst(I, LHSI, CI))
6286 // Handle icmp with constant (but not simple integer constant) RHS
6287 if (Constant *RHSC = dyn_cast<Constant>(Op1)) {
6288 if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
6289 switch (LHSI->getOpcode()) {
6290 case Instruction::GetElementPtr:
6291 if (RHSC->isNullValue()) {
6292 // icmp pred GEP (P, int 0, int 0, int 0), null -> icmp pred P, null
6293 bool isAllZeros = true;
6294 for (unsigned i = 1, e = LHSI->getNumOperands(); i != e; ++i)
6295 if (!isa<Constant>(LHSI->getOperand(i)) ||
6296 !cast<Constant>(LHSI->getOperand(i))->isNullValue()) {
6301 return new ICmpInst(I.getPredicate(), LHSI->getOperand(0),
6302 Constant::getNullValue(LHSI->getOperand(0)->getType()));
6306 case Instruction::PHI:
6307 // Only fold icmp into the PHI if the phi and icmp are in the same
6308 // block. If in the same block, we're encouraging jump threading. If
6309 // not, we are just pessimizing the code by making an i1 phi.
6310 if (LHSI->getParent() == I.getParent())
6311 if (Instruction *NV = FoldOpIntoPhi(I, true))
6314 case Instruction::Select: {
6315 // If either operand of the select is a constant, we can fold the
6316 // comparison into the select arms, which will cause one to be
6317 // constant folded and the select turned into a bitwise or.
6318 Value *Op1 = 0, *Op2 = 0;
6319 if (LHSI->hasOneUse()) {
6320 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) {
6321 // Fold the known value into the constant operand.
6322 Op1 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
6323 // Insert a new ICmp of the other select operand.
6324 Op2 = Builder->CreateICmp(I.getPredicate(), LHSI->getOperand(2),
6326 } else if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) {
6327 // Fold the known value into the constant operand.
6328 Op2 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
6329 // Insert a new ICmp of the other select operand.
6330 Op1 = Builder->CreateICmp(I.getPredicate(), LHSI->getOperand(1),
6336 return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
6339 case Instruction::Call:
6340 // If we have (malloc != null), and if the malloc has a single use, we
6341 // can assume it is successful and remove the malloc.
6342 if (isMalloc(LHSI) && LHSI->hasOneUse() &&
6343 isa<ConstantPointerNull>(RHSC)) {
6344 // Need to explicitly erase malloc call here, instead of adding it to
6345 // Worklist, because it won't get DCE'd from the Worklist since
6346 // isInstructionTriviallyDead() returns false for function calls.
6347 // It is OK to replace LHSI/MallocCall with Undef because the
6348 // instruction that uses it will be erased via Worklist.
6349 if (extractMallocCall(LHSI)) {
6350 LHSI->replaceAllUsesWith(UndefValue::get(LHSI->getType()));
6351 EraseInstFromFunction(*LHSI);
6352 return ReplaceInstUsesWith(I,
6353 ConstantInt::get(Type::getInt1Ty(*Context),
6354 !I.isTrueWhenEqual()));
6356 if (CallInst* MallocCall = extractMallocCallFromBitCast(LHSI))
6357 if (MallocCall->hasOneUse()) {
6358 MallocCall->replaceAllUsesWith(
6359 UndefValue::get(MallocCall->getType()));
6360 EraseInstFromFunction(*MallocCall);
6361 Worklist.Add(LHSI); // The malloc's bitcast use.
6362 return ReplaceInstUsesWith(I,
6363 ConstantInt::get(Type::getInt1Ty(*Context),
6364 !I.isTrueWhenEqual()));
6371 // If we can optimize a 'icmp GEP, P' or 'icmp P, GEP', do so now.
6372 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op0))
6373 if (Instruction *NI = FoldGEPICmp(GEP, Op1, I.getPredicate(), I))
6375 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1))
6376 if (Instruction *NI = FoldGEPICmp(GEP, Op0,
6377 ICmpInst::getSwappedPredicate(I.getPredicate()), I))
6380 // Test to see if the operands of the icmp are casted versions of other
6381 // values. If the ptr->ptr cast can be stripped off both arguments, we do so
6383 if (BitCastInst *CI = dyn_cast<BitCastInst>(Op0)) {
6384 if (isa<PointerType>(Op0->getType()) &&
6385 (isa<Constant>(Op1) || isa<BitCastInst>(Op1))) {
6386 // We keep moving the cast from the left operand over to the right
6387 // operand, where it can often be eliminated completely.
6388 Op0 = CI->getOperand(0);
6390 // If operand #1 is a bitcast instruction, it must also be a ptr->ptr cast
6391 // so eliminate it as well.
6392 if (BitCastInst *CI2 = dyn_cast<BitCastInst>(Op1))
6393 Op1 = CI2->getOperand(0);
6395 // If Op1 is a constant, we can fold the cast into the constant.
6396 if (Op0->getType() != Op1->getType()) {
6397 if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
6398 Op1 = ConstantExpr::getBitCast(Op1C, Op0->getType());
6400 // Otherwise, cast the RHS right before the icmp
6401 Op1 = Builder->CreateBitCast(Op1, Op0->getType());
6404 return new ICmpInst(I.getPredicate(), Op0, Op1);
6408 if (isa<CastInst>(Op0)) {
6409 // Handle the special case of: icmp (cast bool to X), <cst>
6410 // This comes up when you have code like
6413 // For generality, we handle any zero-extension of any operand comparison
6414 // with a constant or another cast from the same type.
6415 if (isa<ConstantInt>(Op1) || isa<CastInst>(Op1))
6416 if (Instruction *R = visitICmpInstWithCastAndCast(I))
6420 // See if it's the same type of instruction on the left and right.
6421 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
6422 if (BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1)) {
6423 if (Op0I->getOpcode() == Op1I->getOpcode() && Op0I->hasOneUse() &&
6424 Op1I->hasOneUse() && Op0I->getOperand(1) == Op1I->getOperand(1)) {
6425 switch (Op0I->getOpcode()) {
6427 case Instruction::Add:
6428 case Instruction::Sub:
6429 case Instruction::Xor:
6430 if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b
6431 return new ICmpInst(I.getPredicate(), Op0I->getOperand(0),
6432 Op1I->getOperand(0));
6433 // icmp u/s (a ^ signbit), (b ^ signbit) --> icmp s/u a, b
6434 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
6435 if (CI->getValue().isSignBit()) {
6436 ICmpInst::Predicate Pred = I.isSigned()
6437 ? I.getUnsignedPredicate()
6438 : I.getSignedPredicate();
6439 return new ICmpInst(Pred, Op0I->getOperand(0),
6440 Op1I->getOperand(0));
6443 if (CI->getValue().isMaxSignedValue()) {
6444 ICmpInst::Predicate Pred = I.isSigned()
6445 ? I.getUnsignedPredicate()
6446 : I.getSignedPredicate();
6447 Pred = I.getSwappedPredicate(Pred);
6448 return new ICmpInst(Pred, Op0I->getOperand(0),
6449 Op1I->getOperand(0));
6453 case Instruction::Mul:
6454 if (!I.isEquality())
6457 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
6458 // a * Cst icmp eq/ne b * Cst --> a & Mask icmp b & Mask
6459 // Mask = -1 >> count-trailing-zeros(Cst).
6460 if (!CI->isZero() && !CI->isOne()) {
6461 const APInt &AP = CI->getValue();
6462 ConstantInt *Mask = ConstantInt::get(*Context,
6463 APInt::getLowBitsSet(AP.getBitWidth(),
6465 AP.countTrailingZeros()));
6466 Value *And1 = Builder->CreateAnd(Op0I->getOperand(0), Mask);
6467 Value *And2 = Builder->CreateAnd(Op1I->getOperand(0), Mask);
6468 return new ICmpInst(I.getPredicate(), And1, And2);
6477 // ~x < ~y --> y < x
6479 if (match(Op0, m_Not(m_Value(A))) &&
6480 match(Op1, m_Not(m_Value(B))))
6481 return new ICmpInst(I.getPredicate(), B, A);
6484 if (I.isEquality()) {
6485 Value *A, *B, *C, *D;
6487 // -x == -y --> x == y
6488 if (match(Op0, m_Neg(m_Value(A))) &&
6489 match(Op1, m_Neg(m_Value(B))))
6490 return new ICmpInst(I.getPredicate(), A, B);
6492 if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
6493 if (A == Op1 || B == Op1) { // (A^B) == A -> B == 0
6494 Value *OtherVal = A == Op1 ? B : A;
6495 return new ICmpInst(I.getPredicate(), OtherVal,
6496 Constant::getNullValue(A->getType()));
6499 if (match(Op1, m_Xor(m_Value(C), m_Value(D)))) {
6500 // A^c1 == C^c2 --> A == C^(c1^c2)
6501 ConstantInt *C1, *C2;
6502 if (match(B, m_ConstantInt(C1)) &&
6503 match(D, m_ConstantInt(C2)) && Op1->hasOneUse()) {
6505 ConstantInt::get(*Context, C1->getValue() ^ C2->getValue());
6506 Value *Xor = Builder->CreateXor(C, NC, "tmp");
6507 return new ICmpInst(I.getPredicate(), A, Xor);
6510 // A^B == A^D -> B == D
6511 if (A == C) return new ICmpInst(I.getPredicate(), B, D);
6512 if (A == D) return new ICmpInst(I.getPredicate(), B, C);
6513 if (B == C) return new ICmpInst(I.getPredicate(), A, D);
6514 if (B == D) return new ICmpInst(I.getPredicate(), A, C);
6518 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) &&
6519 (A == Op0 || B == Op0)) {
6520 // A == (A^B) -> B == 0
6521 Value *OtherVal = A == Op0 ? B : A;
6522 return new ICmpInst(I.getPredicate(), OtherVal,
6523 Constant::getNullValue(A->getType()));
6526 // (A-B) == A -> B == 0
6527 if (match(Op0, m_Sub(m_Specific(Op1), m_Value(B))))
6528 return new ICmpInst(I.getPredicate(), B,
6529 Constant::getNullValue(B->getType()));
6531 // A == (A-B) -> B == 0
6532 if (match(Op1, m_Sub(m_Specific(Op0), m_Value(B))))
6533 return new ICmpInst(I.getPredicate(), B,
6534 Constant::getNullValue(B->getType()));
6536 // (X&Z) == (Y&Z) -> (X^Y) & Z == 0
6537 if (Op0->hasOneUse() && Op1->hasOneUse() &&
6538 match(Op0, m_And(m_Value(A), m_Value(B))) &&
6539 match(Op1, m_And(m_Value(C), m_Value(D)))) {
6540 Value *X = 0, *Y = 0, *Z = 0;
6543 X = B; Y = D; Z = A;
6544 } else if (A == D) {
6545 X = B; Y = C; Z = A;
6546 } else if (B == C) {
6547 X = A; Y = D; Z = B;
6548 } else if (B == D) {
6549 X = A; Y = C; Z = B;
6552 if (X) { // Build (X^Y) & Z
6553 Op1 = Builder->CreateXor(X, Y, "tmp");
6554 Op1 = Builder->CreateAnd(Op1, Z, "tmp");
6555 I.setOperand(0, Op1);
6556 I.setOperand(1, Constant::getNullValue(Op1->getType()));
6561 return Changed ? &I : 0;
6565 /// FoldICmpDivCst - Fold "icmp pred, ([su]div X, DivRHS), CmpRHS" where DivRHS
6566 /// and CmpRHS are both known to be integer constants.
6567 Instruction *InstCombiner::FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
6568 ConstantInt *DivRHS) {
6569 ConstantInt *CmpRHS = cast<ConstantInt>(ICI.getOperand(1));
6570 const APInt &CmpRHSV = CmpRHS->getValue();
6572 // FIXME: If the operand types don't match the type of the divide
6573 // then don't attempt this transform. The code below doesn't have the
6574 // logic to deal with a signed divide and an unsigned compare (and
6575 // vice versa). This is because (x /s C1) <s C2 produces different
6576 // results than (x /s C1) <u C2 or (x /u C1) <s C2 or even
6577 // (x /u C1) <u C2. Simply casting the operands and result won't
6578 // work. :( The if statement below tests that condition and bails
6580 bool DivIsSigned = DivI->getOpcode() == Instruction::SDiv;
6581 if (!ICI.isEquality() && DivIsSigned != ICI.isSigned())
6583 if (DivRHS->isZero())
6584 return 0; // The ProdOV computation fails on divide by zero.
6585 if (DivIsSigned && DivRHS->isAllOnesValue())
6586 return 0; // The overflow computation also screws up here
6587 if (DivRHS->isOne())
6588 return 0; // Not worth bothering, and eliminates some funny cases
6591 // Compute Prod = CI * DivRHS. We are essentially solving an equation
6592 // of form X/C1=C2. We solve for X by multiplying C1 (DivRHS) and
6593 // C2 (CI). By solving for X we can turn this into a range check
6594 // instead of computing a divide.
6595 Constant *Prod = ConstantExpr::getMul(CmpRHS, DivRHS);
6597 // Determine if the product overflows by seeing if the product is
6598 // not equal to the divide. Make sure we do the same kind of divide
6599 // as in the LHS instruction that we're folding.
6600 bool ProdOV = (DivIsSigned ? ConstantExpr::getSDiv(Prod, DivRHS) :
6601 ConstantExpr::getUDiv(Prod, DivRHS)) != CmpRHS;
6603 // Get the ICmp opcode
6604 ICmpInst::Predicate Pred = ICI.getPredicate();
6606 // Figure out the interval that is being checked. For example, a comparison
6607 // like "X /u 5 == 0" is really checking that X is in the interval [0, 5).
6608 // Compute this interval based on the constants involved and the signedness of
6609 // the compare/divide. This computes a half-open interval, keeping track of
6610 // whether either value in the interval overflows. After analysis each
6611 // overflow variable is set to 0 if it's corresponding bound variable is valid
6612 // -1 if overflowed off the bottom end, or +1 if overflowed off the top end.
6613 int LoOverflow = 0, HiOverflow = 0;
6614 Constant *LoBound = 0, *HiBound = 0;
6616 if (!DivIsSigned) { // udiv
6617 // e.g. X/5 op 3 --> [15, 20)
6619 HiOverflow = LoOverflow = ProdOV;
6621 HiOverflow = AddWithOverflow(HiBound, LoBound, DivRHS, Context, false);
6622 } else if (DivRHS->getValue().isStrictlyPositive()) { // Divisor is > 0.
6623 if (CmpRHSV == 0) { // (X / pos) op 0
6624 // Can't overflow. e.g. X/2 op 0 --> [-1, 2)
6625 LoBound = cast<ConstantInt>(ConstantExpr::getNeg(SubOne(DivRHS)));
6627 } else if (CmpRHSV.isStrictlyPositive()) { // (X / pos) op pos
6628 LoBound = Prod; // e.g. X/5 op 3 --> [15, 20)
6629 HiOverflow = LoOverflow = ProdOV;
6631 HiOverflow = AddWithOverflow(HiBound, Prod, DivRHS, Context, true);
6632 } else { // (X / pos) op neg
6633 // e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14)
6634 HiBound = AddOne(Prod);
6635 LoOverflow = HiOverflow = ProdOV ? -1 : 0;
6637 ConstantInt* DivNeg =
6638 cast<ConstantInt>(ConstantExpr::getNeg(DivRHS));
6639 LoOverflow = AddWithOverflow(LoBound, HiBound, DivNeg, Context,
6643 } else if (DivRHS->getValue().isNegative()) { // Divisor is < 0.
6644 if (CmpRHSV == 0) { // (X / neg) op 0
6645 // e.g. X/-5 op 0 --> [-4, 5)
6646 LoBound = AddOne(DivRHS);
6647 HiBound = cast<ConstantInt>(ConstantExpr::getNeg(DivRHS));
6648 if (HiBound == DivRHS) { // -INTMIN = INTMIN
6649 HiOverflow = 1; // [INTMIN+1, overflow)
6650 HiBound = 0; // e.g. X/INTMIN = 0 --> X > INTMIN
6652 } else if (CmpRHSV.isStrictlyPositive()) { // (X / neg) op pos
6653 // e.g. X/-5 op 3 --> [-19, -14)
6654 HiBound = AddOne(Prod);
6655 HiOverflow = LoOverflow = ProdOV ? -1 : 0;
6657 LoOverflow = AddWithOverflow(LoBound, HiBound,
6658 DivRHS, Context, true) ? -1 : 0;
6659 } else { // (X / neg) op neg
6660 LoBound = Prod; // e.g. X/-5 op -3 --> [15, 20)
6661 LoOverflow = HiOverflow = ProdOV;
6663 HiOverflow = SubWithOverflow(HiBound, Prod, DivRHS, Context, true);
6666 // Dividing by a negative swaps the condition. LT <-> GT
6667 Pred = ICmpInst::getSwappedPredicate(Pred);
6670 Value *X = DivI->getOperand(0);
6672 default: llvm_unreachable("Unhandled icmp opcode!");
6673 case ICmpInst::ICMP_EQ:
6674 if (LoOverflow && HiOverflow)
6675 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(*Context));
6676 else if (HiOverflow)
6677 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
6678 ICmpInst::ICMP_UGE, X, LoBound);
6679 else if (LoOverflow)
6680 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
6681 ICmpInst::ICMP_ULT, X, HiBound);
6683 return InsertRangeTest(X, LoBound, HiBound, DivIsSigned, true, ICI);
6684 case ICmpInst::ICMP_NE:
6685 if (LoOverflow && HiOverflow)
6686 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(*Context));
6687 else if (HiOverflow)
6688 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
6689 ICmpInst::ICMP_ULT, X, LoBound);
6690 else if (LoOverflow)
6691 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
6692 ICmpInst::ICMP_UGE, X, HiBound);
6694 return InsertRangeTest(X, LoBound, HiBound, DivIsSigned, false, ICI);
6695 case ICmpInst::ICMP_ULT:
6696 case ICmpInst::ICMP_SLT:
6697 if (LoOverflow == +1) // Low bound is greater than input range.
6698 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(*Context));
6699 if (LoOverflow == -1) // Low bound is less than input range.
6700 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(*Context));
6701 return new ICmpInst(Pred, X, LoBound);
6702 case ICmpInst::ICMP_UGT:
6703 case ICmpInst::ICMP_SGT:
6704 if (HiOverflow == +1) // High bound greater than input range.
6705 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(*Context));
6706 else if (HiOverflow == -1) // High bound less than input range.
6707 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(*Context));
6708 if (Pred == ICmpInst::ICMP_UGT)
6709 return new ICmpInst(ICmpInst::ICMP_UGE, X, HiBound);
6711 return new ICmpInst(ICmpInst::ICMP_SGE, X, HiBound);
6716 /// visitICmpInstWithInstAndIntCst - Handle "icmp (instr, intcst)".
6718 Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
6721 const APInt &RHSV = RHS->getValue();
6723 switch (LHSI->getOpcode()) {
6724 case Instruction::Trunc:
6725 if (ICI.isEquality() && LHSI->hasOneUse()) {
6726 // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all
6727 // of the high bits truncated out of x are known.
6728 unsigned DstBits = LHSI->getType()->getPrimitiveSizeInBits(),
6729 SrcBits = LHSI->getOperand(0)->getType()->getPrimitiveSizeInBits();
6730 APInt Mask(APInt::getHighBitsSet(SrcBits, SrcBits-DstBits));
6731 APInt KnownZero(SrcBits, 0), KnownOne(SrcBits, 0);
6732 ComputeMaskedBits(LHSI->getOperand(0), Mask, KnownZero, KnownOne);
6734 // If all the high bits are known, we can do this xform.
6735 if ((KnownZero|KnownOne).countLeadingOnes() >= SrcBits-DstBits) {
6736 // Pull in the high bits from known-ones set.
6737 APInt NewRHS(RHS->getValue());
6738 NewRHS.zext(SrcBits);
6740 return new ICmpInst(ICI.getPredicate(), LHSI->getOperand(0),
6741 ConstantInt::get(*Context, NewRHS));
6746 case Instruction::Xor: // (icmp pred (xor X, XorCST), CI)
6747 if (ConstantInt *XorCST = dyn_cast<ConstantInt>(LHSI->getOperand(1))) {
6748 // If this is a comparison that tests the signbit (X < 0) or (x > -1),
6750 if ((ICI.getPredicate() == ICmpInst::ICMP_SLT && RHSV == 0) ||
6751 (ICI.getPredicate() == ICmpInst::ICMP_SGT && RHSV.isAllOnesValue())) {
6752 Value *CompareVal = LHSI->getOperand(0);
6754 // If the sign bit of the XorCST is not set, there is no change to
6755 // the operation, just stop using the Xor.
6756 if (!XorCST->getValue().isNegative()) {
6757 ICI.setOperand(0, CompareVal);
6762 // Was the old condition true if the operand is positive?
6763 bool isTrueIfPositive = ICI.getPredicate() == ICmpInst::ICMP_SGT;
6765 // If so, the new one isn't.
6766 isTrueIfPositive ^= true;
6768 if (isTrueIfPositive)
6769 return new ICmpInst(ICmpInst::ICMP_SGT, CompareVal,
6772 return new ICmpInst(ICmpInst::ICMP_SLT, CompareVal,
6776 if (LHSI->hasOneUse()) {
6777 // (icmp u/s (xor A SignBit), C) -> (icmp s/u A, (xor C SignBit))
6778 if (!ICI.isEquality() && XorCST->getValue().isSignBit()) {
6779 const APInt &SignBit = XorCST->getValue();
6780 ICmpInst::Predicate Pred = ICI.isSigned()
6781 ? ICI.getUnsignedPredicate()
6782 : ICI.getSignedPredicate();
6783 return new ICmpInst(Pred, LHSI->getOperand(0),
6784 ConstantInt::get(*Context, RHSV ^ SignBit));
6787 // (icmp u/s (xor A ~SignBit), C) -> (icmp s/u (xor C ~SignBit), A)
6788 if (!ICI.isEquality() && XorCST->getValue().isMaxSignedValue()) {
6789 const APInt &NotSignBit = XorCST->getValue();
6790 ICmpInst::Predicate Pred = ICI.isSigned()
6791 ? ICI.getUnsignedPredicate()
6792 : ICI.getSignedPredicate();
6793 Pred = ICI.getSwappedPredicate(Pred);
6794 return new ICmpInst(Pred, LHSI->getOperand(0),
6795 ConstantInt::get(*Context, RHSV ^ NotSignBit));
6800 case Instruction::And: // (icmp pred (and X, AndCST), RHS)
6801 if (LHSI->hasOneUse() && isa<ConstantInt>(LHSI->getOperand(1)) &&
6802 LHSI->getOperand(0)->hasOneUse()) {
6803 ConstantInt *AndCST = cast<ConstantInt>(LHSI->getOperand(1));
6805 // If the LHS is an AND of a truncating cast, we can widen the
6806 // and/compare to be the input width without changing the value
6807 // produced, eliminating a cast.
6808 if (TruncInst *Cast = dyn_cast<TruncInst>(LHSI->getOperand(0))) {
6809 // We can do this transformation if either the AND constant does not
6810 // have its sign bit set or if it is an equality comparison.
6811 // Extending a relational comparison when we're checking the sign
6812 // bit would not work.
6813 if (Cast->hasOneUse() &&
6814 (ICI.isEquality() ||
6815 (AndCST->getValue().isNonNegative() && RHSV.isNonNegative()))) {
6817 cast<IntegerType>(Cast->getOperand(0)->getType())->getBitWidth();
6818 APInt NewCST = AndCST->getValue();
6819 NewCST.zext(BitWidth);
6821 NewCI.zext(BitWidth);
6823 Builder->CreateAnd(Cast->getOperand(0),
6824 ConstantInt::get(*Context, NewCST), LHSI->getName());
6825 return new ICmpInst(ICI.getPredicate(), NewAnd,
6826 ConstantInt::get(*Context, NewCI));
6830 // If this is: (X >> C1) & C2 != C3 (where any shift and any compare
6831 // could exist), turn it into (X & (C2 << C1)) != (C3 << C1). This
6832 // happens a LOT in code produced by the C front-end, for bitfield
6834 BinaryOperator *Shift = dyn_cast<BinaryOperator>(LHSI->getOperand(0));
6835 if (Shift && !Shift->isShift())
6839 ShAmt = Shift ? dyn_cast<ConstantInt>(Shift->getOperand(1)) : 0;
6840 const Type *Ty = Shift ? Shift->getType() : 0; // Type of the shift.
6841 const Type *AndTy = AndCST->getType(); // Type of the and.
6843 // We can fold this as long as we can't shift unknown bits
6844 // into the mask. This can only happen with signed shift
6845 // rights, as they sign-extend.
6847 bool CanFold = Shift->isLogicalShift();
6849 // To test for the bad case of the signed shr, see if any
6850 // of the bits shifted in could be tested after the mask.
6851 uint32_t TyBits = Ty->getPrimitiveSizeInBits();
6852 int ShAmtVal = TyBits - ShAmt->getLimitedValue(TyBits);
6854 uint32_t BitWidth = AndTy->getPrimitiveSizeInBits();
6855 if ((APInt::getHighBitsSet(BitWidth, BitWidth-ShAmtVal) &
6856 AndCST->getValue()) == 0)
6862 if (Shift->getOpcode() == Instruction::Shl)
6863 NewCst = ConstantExpr::getLShr(RHS, ShAmt);
6865 NewCst = ConstantExpr::getShl(RHS, ShAmt);
6867 // Check to see if we are shifting out any of the bits being
6869 if (ConstantExpr::get(Shift->getOpcode(),
6870 NewCst, ShAmt) != RHS) {
6871 // If we shifted bits out, the fold is not going to work out.
6872 // As a special case, check to see if this means that the
6873 // result is always true or false now.
6874 if (ICI.getPredicate() == ICmpInst::ICMP_EQ)
6875 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(*Context));
6876 if (ICI.getPredicate() == ICmpInst::ICMP_NE)
6877 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(*Context));
6879 ICI.setOperand(1, NewCst);
6880 Constant *NewAndCST;
6881 if (Shift->getOpcode() == Instruction::Shl)
6882 NewAndCST = ConstantExpr::getLShr(AndCST, ShAmt);
6884 NewAndCST = ConstantExpr::getShl(AndCST, ShAmt);
6885 LHSI->setOperand(1, NewAndCST);
6886 LHSI->setOperand(0, Shift->getOperand(0));
6887 Worklist.Add(Shift); // Shift is dead.
6893 // Turn ((X >> Y) & C) == 0 into (X & (C << Y)) == 0. The later is
6894 // preferable because it allows the C<<Y expression to be hoisted out
6895 // of a loop if Y is invariant and X is not.
6896 if (Shift && Shift->hasOneUse() && RHSV == 0 &&
6897 ICI.isEquality() && !Shift->isArithmeticShift() &&
6898 !isa<Constant>(Shift->getOperand(0))) {
6901 if (Shift->getOpcode() == Instruction::LShr) {
6902 NS = Builder->CreateShl(AndCST, Shift->getOperand(1), "tmp");
6904 // Insert a logical shift.
6905 NS = Builder->CreateLShr(AndCST, Shift->getOperand(1), "tmp");
6908 // Compute X & (C << Y).
6910 Builder->CreateAnd(Shift->getOperand(0), NS, LHSI->getName());
6912 ICI.setOperand(0, NewAnd);
6918 case Instruction::Shl: { // (icmp pred (shl X, ShAmt), CI)
6919 ConstantInt *ShAmt = dyn_cast<ConstantInt>(LHSI->getOperand(1));
6922 uint32_t TypeBits = RHSV.getBitWidth();
6924 // Check that the shift amount is in range. If not, don't perform
6925 // undefined shifts. When the shift is visited it will be
6927 if (ShAmt->uge(TypeBits))
6930 if (ICI.isEquality()) {
6931 // If we are comparing against bits always shifted out, the
6932 // comparison cannot succeed.
6934 ConstantExpr::getShl(ConstantExpr::getLShr(RHS, ShAmt),
6936 if (Comp != RHS) {// Comparing against a bit that we know is zero.
6937 bool IsICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
6938 Constant *Cst = ConstantInt::get(Type::getInt1Ty(*Context), IsICMP_NE);
6939 return ReplaceInstUsesWith(ICI, Cst);
6942 if (LHSI->hasOneUse()) {
6943 // Otherwise strength reduce the shift into an and.
6944 uint32_t ShAmtVal = (uint32_t)ShAmt->getLimitedValue(TypeBits);
6946 ConstantInt::get(*Context, APInt::getLowBitsSet(TypeBits,
6947 TypeBits-ShAmtVal));
6950 Builder->CreateAnd(LHSI->getOperand(0),Mask, LHSI->getName()+".mask");
6951 return new ICmpInst(ICI.getPredicate(), And,
6952 ConstantInt::get(*Context, RHSV.lshr(ShAmtVal)));
6956 // Otherwise, if this is a comparison of the sign bit, simplify to and/test.
6957 bool TrueIfSigned = false;
6958 if (LHSI->hasOneUse() &&
6959 isSignBitCheck(ICI.getPredicate(), RHS, TrueIfSigned)) {
6960 // (X << 31) <s 0 --> (X&1) != 0
6961 Constant *Mask = ConstantInt::get(*Context, APInt(TypeBits, 1) <<
6962 (TypeBits-ShAmt->getZExtValue()-1));
6964 Builder->CreateAnd(LHSI->getOperand(0), Mask, LHSI->getName()+".mask");
6965 return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ,
6966 And, Constant::getNullValue(And->getType()));
6971 case Instruction::LShr: // (icmp pred (shr X, ShAmt), CI)
6972 case Instruction::AShr: {
6973 // Only handle equality comparisons of shift-by-constant.
6974 ConstantInt *ShAmt = dyn_cast<ConstantInt>(LHSI->getOperand(1));
6975 if (!ShAmt || !ICI.isEquality()) break;
6977 // Check that the shift amount is in range. If not, don't perform
6978 // undefined shifts. When the shift is visited it will be
6980 uint32_t TypeBits = RHSV.getBitWidth();
6981 if (ShAmt->uge(TypeBits))
6984 uint32_t ShAmtVal = (uint32_t)ShAmt->getLimitedValue(TypeBits);
6986 // If we are comparing against bits always shifted out, the
6987 // comparison cannot succeed.
6988 APInt Comp = RHSV << ShAmtVal;
6989 if (LHSI->getOpcode() == Instruction::LShr)
6990 Comp = Comp.lshr(ShAmtVal);
6992 Comp = Comp.ashr(ShAmtVal);
6994 if (Comp != RHSV) { // Comparing against a bit that we know is zero.
6995 bool IsICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
6996 Constant *Cst = ConstantInt::get(Type::getInt1Ty(*Context), IsICMP_NE);
6997 return ReplaceInstUsesWith(ICI, Cst);
7000 // Otherwise, check to see if the bits shifted out are known to be zero.
7001 // If so, we can compare against the unshifted value:
7002 // (X & 4) >> 1 == 2 --> (X & 4) == 4.
7003 if (LHSI->hasOneUse() &&
7004 MaskedValueIsZero(LHSI->getOperand(0),
7005 APInt::getLowBitsSet(Comp.getBitWidth(), ShAmtVal))) {
7006 return new ICmpInst(ICI.getPredicate(), LHSI->getOperand(0),
7007 ConstantExpr::getShl(RHS, ShAmt));
7010 if (LHSI->hasOneUse()) {
7011 // Otherwise strength reduce the shift into an and.
7012 APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal));
7013 Constant *Mask = ConstantInt::get(*Context, Val);
7015 Value *And = Builder->CreateAnd(LHSI->getOperand(0),
7016 Mask, LHSI->getName()+".mask");
7017 return new ICmpInst(ICI.getPredicate(), And,
7018 ConstantExpr::getShl(RHS, ShAmt));
7023 case Instruction::SDiv:
7024 case Instruction::UDiv:
7025 // Fold: icmp pred ([us]div X, C1), C2 -> range test
7026 // Fold this div into the comparison, producing a range check.
7027 // Determine, based on the divide type, what the range is being
7028 // checked. If there is an overflow on the low or high side, remember
7029 // it, otherwise compute the range [low, hi) bounding the new value.
7030 // See: InsertRangeTest above for the kinds of replacements possible.
7031 if (ConstantInt *DivRHS = dyn_cast<ConstantInt>(LHSI->getOperand(1)))
7032 if (Instruction *R = FoldICmpDivCst(ICI, cast<BinaryOperator>(LHSI),
7037 case Instruction::Add:
7038 // Fold: icmp pred (add, X, C1), C2
7040 if (!ICI.isEquality()) {
7041 ConstantInt *LHSC = dyn_cast<ConstantInt>(LHSI->getOperand(1));
7043 const APInt &LHSV = LHSC->getValue();
7045 ConstantRange CR = ICI.makeConstantRange(ICI.getPredicate(), RHSV)
7048 if (ICI.isSigned()) {
7049 if (CR.getLower().isSignBit()) {
7050 return new ICmpInst(ICmpInst::ICMP_SLT, LHSI->getOperand(0),
7051 ConstantInt::get(*Context, CR.getUpper()));
7052 } else if (CR.getUpper().isSignBit()) {
7053 return new ICmpInst(ICmpInst::ICMP_SGE, LHSI->getOperand(0),
7054 ConstantInt::get(*Context, CR.getLower()));
7057 if (CR.getLower().isMinValue()) {
7058 return new ICmpInst(ICmpInst::ICMP_ULT, LHSI->getOperand(0),
7059 ConstantInt::get(*Context, CR.getUpper()));
7060 } else if (CR.getUpper().isMinValue()) {
7061 return new ICmpInst(ICmpInst::ICMP_UGE, LHSI->getOperand(0),
7062 ConstantInt::get(*Context, CR.getLower()));
7069 // Simplify icmp_eq and icmp_ne instructions with integer constant RHS.
7070 if (ICI.isEquality()) {
7071 bool isICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
7073 // If the first operand is (add|sub|and|or|xor|rem) with a constant, and
7074 // the second operand is a constant, simplify a bit.
7075 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(LHSI)) {
7076 switch (BO->getOpcode()) {
7077 case Instruction::SRem:
7078 // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one.
7079 if (RHSV == 0 && isa<ConstantInt>(BO->getOperand(1)) &&BO->hasOneUse()){
7080 const APInt &V = cast<ConstantInt>(BO->getOperand(1))->getValue();
7081 if (V.sgt(APInt(V.getBitWidth(), 1)) && V.isPowerOf2()) {
7083 Builder->CreateURem(BO->getOperand(0), BO->getOperand(1),
7085 return new ICmpInst(ICI.getPredicate(), NewRem,
7086 Constant::getNullValue(BO->getType()));
7090 case Instruction::Add:
7091 // Replace ((add A, B) != C) with (A != C-B) if B & C are constants.
7092 if (ConstantInt *BOp1C = dyn_cast<ConstantInt>(BO->getOperand(1))) {
7093 if (BO->hasOneUse())
7094 return new ICmpInst(ICI.getPredicate(), BO->getOperand(0),
7095 ConstantExpr::getSub(RHS, BOp1C));
7096 } else if (RHSV == 0) {
7097 // Replace ((add A, B) != 0) with (A != -B) if A or B is
7098 // efficiently invertible, or if the add has just this one use.
7099 Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1);
7101 if (Value *NegVal = dyn_castNegVal(BOp1))
7102 return new ICmpInst(ICI.getPredicate(), BOp0, NegVal);
7103 else if (Value *NegVal = dyn_castNegVal(BOp0))
7104 return new ICmpInst(ICI.getPredicate(), NegVal, BOp1);
7105 else if (BO->hasOneUse()) {
7106 Value *Neg = Builder->CreateNeg(BOp1);
7108 return new ICmpInst(ICI.getPredicate(), BOp0, Neg);
7112 case Instruction::Xor:
7113 // For the xor case, we can xor two constants together, eliminating
7114 // the explicit xor.
7115 if (Constant *BOC = dyn_cast<Constant>(BO->getOperand(1)))
7116 return new ICmpInst(ICI.getPredicate(), BO->getOperand(0),
7117 ConstantExpr::getXor(RHS, BOC));
7120 case Instruction::Sub:
7121 // Replace (([sub|xor] A, B) != 0) with (A != B)
7123 return new ICmpInst(ICI.getPredicate(), BO->getOperand(0),
7127 case Instruction::Or:
7128 // If bits are being or'd in that are not present in the constant we
7129 // are comparing against, then the comparison could never succeed!
7130 if (Constant *BOC = dyn_cast<Constant>(BO->getOperand(1))) {
7131 Constant *NotCI = ConstantExpr::getNot(RHS);
7132 if (!ConstantExpr::getAnd(BOC, NotCI)->isNullValue())
7133 return ReplaceInstUsesWith(ICI,
7134 ConstantInt::get(Type::getInt1Ty(*Context),
7139 case Instruction::And:
7140 if (ConstantInt *BOC = dyn_cast<ConstantInt>(BO->getOperand(1))) {
7141 // If bits are being compared against that are and'd out, then the
7142 // comparison can never succeed!
7143 if ((RHSV & ~BOC->getValue()) != 0)
7144 return ReplaceInstUsesWith(ICI,
7145 ConstantInt::get(Type::getInt1Ty(*Context),
7148 // If we have ((X & C) == C), turn it into ((X & C) != 0).
7149 if (RHS == BOC && RHSV.isPowerOf2())
7150 return new ICmpInst(isICMP_NE ? ICmpInst::ICMP_EQ :
7151 ICmpInst::ICMP_NE, LHSI,
7152 Constant::getNullValue(RHS->getType()));
7154 // Replace (and X, (1 << size(X)-1) != 0) with x s< 0
7155 if (BOC->getValue().isSignBit()) {
7156 Value *X = BO->getOperand(0);
7157 Constant *Zero = Constant::getNullValue(X->getType());
7158 ICmpInst::Predicate pred = isICMP_NE ?
7159 ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE;
7160 return new ICmpInst(pred, X, Zero);
7163 // ((X & ~7) == 0) --> X < 8
7164 if (RHSV == 0 && isHighOnes(BOC)) {
7165 Value *X = BO->getOperand(0);
7166 Constant *NegX = ConstantExpr::getNeg(BOC);
7167 ICmpInst::Predicate pred = isICMP_NE ?
7168 ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
7169 return new ICmpInst(pred, X, NegX);
7174 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(LHSI)) {
7175 // Handle icmp {eq|ne} <intrinsic>, intcst.
7176 if (II->getIntrinsicID() == Intrinsic::bswap) {
7178 ICI.setOperand(0, II->getOperand(1));
7179 ICI.setOperand(1, ConstantInt::get(*Context, RHSV.byteSwap()));
7187 /// visitICmpInstWithCastAndCast - Handle icmp (cast x to y), (cast/cst).
7188 /// We only handle extending casts so far.
7190 Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
7191 const CastInst *LHSCI = cast<CastInst>(ICI.getOperand(0));
7192 Value *LHSCIOp = LHSCI->getOperand(0);
7193 const Type *SrcTy = LHSCIOp->getType();
7194 const Type *DestTy = LHSCI->getType();
7197 // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
7198 // integer type is the same size as the pointer type.
7199 if (TD && LHSCI->getOpcode() == Instruction::PtrToInt &&
7200 TD->getPointerSizeInBits() ==
7201 cast<IntegerType>(DestTy)->getBitWidth()) {
7203 if (Constant *RHSC = dyn_cast<Constant>(ICI.getOperand(1))) {
7204 RHSOp = ConstantExpr::getIntToPtr(RHSC, SrcTy);
7205 } else if (PtrToIntInst *RHSC = dyn_cast<PtrToIntInst>(ICI.getOperand(1))) {
7206 RHSOp = RHSC->getOperand(0);
7207 // If the pointer types don't match, insert a bitcast.
7208 if (LHSCIOp->getType() != RHSOp->getType())
7209 RHSOp = Builder->CreateBitCast(RHSOp, LHSCIOp->getType());
7213 return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSOp);
7216 // The code below only handles extension cast instructions, so far.
7218 if (LHSCI->getOpcode() != Instruction::ZExt &&
7219 LHSCI->getOpcode() != Instruction::SExt)
7222 bool isSignedExt = LHSCI->getOpcode() == Instruction::SExt;
7223 bool isSignedCmp = ICI.isSigned();
7225 if (CastInst *CI = dyn_cast<CastInst>(ICI.getOperand(1))) {
7226 // Not an extension from the same type?
7227 RHSCIOp = CI->getOperand(0);
7228 if (RHSCIOp->getType() != LHSCIOp->getType())
7231 // If the signedness of the two casts doesn't agree (i.e. one is a sext
7232 // and the other is a zext), then we can't handle this.
7233 if (CI->getOpcode() != LHSCI->getOpcode())
7236 // Deal with equality cases early.
7237 if (ICI.isEquality())
7238 return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSCIOp);
7240 // A signed comparison of sign extended values simplifies into a
7241 // signed comparison.
7242 if (isSignedCmp && isSignedExt)
7243 return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSCIOp);
7245 // The other three cases all fold into an unsigned comparison.
7246 return new ICmpInst(ICI.getUnsignedPredicate(), LHSCIOp, RHSCIOp);
7249 // If we aren't dealing with a constant on the RHS, exit early
7250 ConstantInt *CI = dyn_cast<ConstantInt>(ICI.getOperand(1));
7254 // Compute the constant that would happen if we truncated to SrcTy then
7255 // reextended to DestTy.
7256 Constant *Res1 = ConstantExpr::getTrunc(CI, SrcTy);
7257 Constant *Res2 = ConstantExpr::getCast(LHSCI->getOpcode(),
7260 // If the re-extended constant didn't change...
7262 // Make sure that sign of the Cmp and the sign of the Cast are the same.
7263 // For example, we might have:
7264 // %A = sext i16 %X to i32
7265 // %B = icmp ugt i32 %A, 1330
7266 // It is incorrect to transform this into
7267 // %B = icmp ugt i16 %X, 1330
7268 // because %A may have negative value.
7270 // However, we allow this when the compare is EQ/NE, because they are
7272 if (isSignedExt == isSignedCmp || ICI.isEquality())
7273 return new ICmpInst(ICI.getPredicate(), LHSCIOp, Res1);
7277 // The re-extended constant changed so the constant cannot be represented
7278 // in the shorter type. Consequently, we cannot emit a simple comparison.
7280 // First, handle some easy cases. We know the result cannot be equal at this
7281 // point so handle the ICI.isEquality() cases
7282 if (ICI.getPredicate() == ICmpInst::ICMP_EQ)
7283 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(*Context));
7284 if (ICI.getPredicate() == ICmpInst::ICMP_NE)
7285 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(*Context));
7287 // Evaluate the comparison for LT (we invert for GT below). LE and GE cases
7288 // should have been folded away previously and not enter in here.
7291 // We're performing a signed comparison.
7292 if (cast<ConstantInt>(CI)->getValue().isNegative())
7293 Result = ConstantInt::getFalse(*Context); // X < (small) --> false
7295 Result = ConstantInt::getTrue(*Context); // X < (large) --> true
7297 // We're performing an unsigned comparison.
7299 // We're performing an unsigned comp with a sign extended value.
7300 // This is true if the input is >= 0. [aka >s -1]
7301 Constant *NegOne = Constant::getAllOnesValue(SrcTy);
7302 Result = Builder->CreateICmpSGT(LHSCIOp, NegOne, ICI.getName());
7304 // Unsigned extend & unsigned compare -> always true.
7305 Result = ConstantInt::getTrue(*Context);
7309 // Finally, return the value computed.
7310 if (ICI.getPredicate() == ICmpInst::ICMP_ULT ||
7311 ICI.getPredicate() == ICmpInst::ICMP_SLT)
7312 return ReplaceInstUsesWith(ICI, Result);
7314 assert((ICI.getPredicate()==ICmpInst::ICMP_UGT ||
7315 ICI.getPredicate()==ICmpInst::ICMP_SGT) &&
7316 "ICmp should be folded!");
7317 if (Constant *CI = dyn_cast<Constant>(Result))
7318 return ReplaceInstUsesWith(ICI, ConstantExpr::getNot(CI));
7319 return BinaryOperator::CreateNot(Result);
7322 Instruction *InstCombiner::visitShl(BinaryOperator &I) {
7323 return commonShiftTransforms(I);
7326 Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
7327 return commonShiftTransforms(I);
7330 Instruction *InstCombiner::visitAShr(BinaryOperator &I) {
7331 if (Instruction *R = commonShiftTransforms(I))
7334 Value *Op0 = I.getOperand(0);
7336 // ashr int -1, X = -1 (for any arithmetic shift rights of ~0)
7337 if (ConstantInt *CSI = dyn_cast<ConstantInt>(Op0))
7338 if (CSI->isAllOnesValue())
7339 return ReplaceInstUsesWith(I, CSI);
7341 // See if we can turn a signed shr into an unsigned shr.
7342 if (MaskedValueIsZero(Op0,
7343 APInt::getSignBit(I.getType()->getScalarSizeInBits())))
7344 return BinaryOperator::CreateLShr(Op0, I.getOperand(1));
7346 // Arithmetic shifting an all-sign-bit value is a no-op.
7347 unsigned NumSignBits = ComputeNumSignBits(Op0);
7348 if (NumSignBits == Op0->getType()->getScalarSizeInBits())
7349 return ReplaceInstUsesWith(I, Op0);
7354 Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) {
7355 assert(I.getOperand(1)->getType() == I.getOperand(0)->getType());
7356 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
7358 // shl X, 0 == X and shr X, 0 == X
7359 // shl 0, X == 0 and shr 0, X == 0
7360 if (Op1 == Constant::getNullValue(Op1->getType()) ||
7361 Op0 == Constant::getNullValue(Op0->getType()))
7362 return ReplaceInstUsesWith(I, Op0);
7364 if (isa<UndefValue>(Op0)) {
7365 if (I.getOpcode() == Instruction::AShr) // undef >>s X -> undef
7366 return ReplaceInstUsesWith(I, Op0);
7367 else // undef << X -> 0, undef >>u X -> 0
7368 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
7370 if (isa<UndefValue>(Op1)) {
7371 if (I.getOpcode() == Instruction::AShr) // X >>s undef -> X
7372 return ReplaceInstUsesWith(I, Op0);
7373 else // X << undef, X >>u undef -> 0
7374 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
7377 // See if we can fold away this shift.
7378 if (SimplifyDemandedInstructionBits(I))
7381 // Try to fold constant and into select arguments.
7382 if (isa<Constant>(Op0))
7383 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
7384 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
7387 if (ConstantInt *CUI = dyn_cast<ConstantInt>(Op1))
7388 if (Instruction *Res = FoldShiftByConstant(Op0, CUI, I))
7393 Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
7394 BinaryOperator &I) {
7395 bool isLeftShift = I.getOpcode() == Instruction::Shl;
7397 // See if we can simplify any instructions used by the instruction whose sole
7398 // purpose is to compute bits we don't care about.
7399 uint32_t TypeBits = Op0->getType()->getScalarSizeInBits();
7401 // shl i32 X, 32 = 0 and srl i8 Y, 9 = 0, ... just don't eliminate
7404 if (Op1->uge(TypeBits)) {
7405 if (I.getOpcode() != Instruction::AShr)
7406 return ReplaceInstUsesWith(I, Constant::getNullValue(Op0->getType()));
7408 I.setOperand(1, ConstantInt::get(I.getType(), TypeBits-1));
7413 // ((X*C1) << C2) == (X * (C1 << C2))
7414 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Op0))
7415 if (BO->getOpcode() == Instruction::Mul && isLeftShift)
7416 if (Constant *BOOp = dyn_cast<Constant>(BO->getOperand(1)))
7417 return BinaryOperator::CreateMul(BO->getOperand(0),
7418 ConstantExpr::getShl(BOOp, Op1));
7420 // Try to fold constant and into select arguments.
7421 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
7422 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
7424 if (isa<PHINode>(Op0))
7425 if (Instruction *NV = FoldOpIntoPhi(I))
7428 // Fold shift2(trunc(shift1(x,c1)), c2) -> trunc(shift2(shift1(x,c1),c2))
7429 if (TruncInst *TI = dyn_cast<TruncInst>(Op0)) {
7430 Instruction *TrOp = dyn_cast<Instruction>(TI->getOperand(0));
7431 // If 'shift2' is an ashr, we would have to get the sign bit into a funny
7432 // place. Don't try to do this transformation in this case. Also, we
7433 // require that the input operand is a shift-by-constant so that we have
7434 // confidence that the shifts will get folded together. We could do this
7435 // xform in more cases, but it is unlikely to be profitable.
7436 if (TrOp && I.isLogicalShift() && TrOp->isShift() &&
7437 isa<ConstantInt>(TrOp->getOperand(1))) {
7438 // Okay, we'll do this xform. Make the shift of shift.
7439 Constant *ShAmt = ConstantExpr::getZExt(Op1, TrOp->getType());
7440 // (shift2 (shift1 & 0x00FF), c2)
7441 Value *NSh = Builder->CreateBinOp(I.getOpcode(), TrOp, ShAmt,I.getName());
7443 // For logical shifts, the truncation has the effect of making the high
7444 // part of the register be zeros. Emulate this by inserting an AND to
7445 // clear the top bits as needed. This 'and' will usually be zapped by
7446 // other xforms later if dead.
7447 unsigned SrcSize = TrOp->getType()->getScalarSizeInBits();
7448 unsigned DstSize = TI->getType()->getScalarSizeInBits();
7449 APInt MaskV(APInt::getLowBitsSet(SrcSize, DstSize));
7451 // The mask we constructed says what the trunc would do if occurring
7452 // between the shifts. We want to know the effect *after* the second
7453 // shift. We know that it is a logical shift by a constant, so adjust the
7454 // mask as appropriate.
7455 if (I.getOpcode() == Instruction::Shl)
7456 MaskV <<= Op1->getZExtValue();
7458 assert(I.getOpcode() == Instruction::LShr && "Unknown logical shift");
7459 MaskV = MaskV.lshr(Op1->getZExtValue());
7463 Value *And = Builder->CreateAnd(NSh, ConstantInt::get(*Context, MaskV),
7466 // Return the value truncated to the interesting size.
7467 return new TruncInst(And, I.getType());
7471 if (Op0->hasOneUse()) {
7472 if (BinaryOperator *Op0BO = dyn_cast<BinaryOperator>(Op0)) {
7473 // Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
7476 switch (Op0BO->getOpcode()) {
7478 case Instruction::Add:
7479 case Instruction::And:
7480 case Instruction::Or:
7481 case Instruction::Xor: {
7482 // These operators commute.
7483 // Turn (Y + (X >> C)) << C -> (X + (Y << C)) & (~0 << C)
7484 if (isLeftShift && Op0BO->getOperand(1)->hasOneUse() &&
7485 match(Op0BO->getOperand(1), m_Shr(m_Value(V1),
7486 m_Specific(Op1)))) {
7487 Value *YS = // (Y << C)
7488 Builder->CreateShl(Op0BO->getOperand(0), Op1, Op0BO->getName());
7490 Value *X = Builder->CreateBinOp(Op0BO->getOpcode(), YS, V1,
7491 Op0BO->getOperand(1)->getName());
7492 uint32_t Op1Val = Op1->getLimitedValue(TypeBits);
7493 return BinaryOperator::CreateAnd(X, ConstantInt::get(*Context,
7494 APInt::getHighBitsSet(TypeBits, TypeBits-Op1Val)));
7497 // Turn (Y + ((X >> C) & CC)) << C -> ((X & (CC << C)) + (Y << C))
7498 Value *Op0BOOp1 = Op0BO->getOperand(1);
7499 if (isLeftShift && Op0BOOp1->hasOneUse() &&
7501 m_And(m_Shr(m_Value(V1), m_Specific(Op1)),
7502 m_ConstantInt(CC))) &&
7503 cast<BinaryOperator>(Op0BOOp1)->getOperand(0)->hasOneUse()) {
7504 Value *YS = // (Y << C)
7505 Builder->CreateShl(Op0BO->getOperand(0), Op1,
7508 Value *XM = Builder->CreateAnd(V1, ConstantExpr::getShl(CC, Op1),
7509 V1->getName()+".mask");
7510 return BinaryOperator::Create(Op0BO->getOpcode(), YS, XM);
7515 case Instruction::Sub: {
7516 // Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
7517 if (isLeftShift && Op0BO->getOperand(0)->hasOneUse() &&
7518 match(Op0BO->getOperand(0), m_Shr(m_Value(V1),
7519 m_Specific(Op1)))) {
7520 Value *YS = // (Y << C)
7521 Builder->CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName());
7523 Value *X = Builder->CreateBinOp(Op0BO->getOpcode(), V1, YS,
7524 Op0BO->getOperand(0)->getName());
7525 uint32_t Op1Val = Op1->getLimitedValue(TypeBits);
7526 return BinaryOperator::CreateAnd(X, ConstantInt::get(*Context,
7527 APInt::getHighBitsSet(TypeBits, TypeBits-Op1Val)));
7530 // Turn (((X >> C)&CC) + Y) << C -> (X + (Y << C)) & (CC << C)
7531 if (isLeftShift && Op0BO->getOperand(0)->hasOneUse() &&
7532 match(Op0BO->getOperand(0),
7533 m_And(m_Shr(m_Value(V1), m_Value(V2)),
7534 m_ConstantInt(CC))) && V2 == Op1 &&
7535 cast<BinaryOperator>(Op0BO->getOperand(0))
7536 ->getOperand(0)->hasOneUse()) {
7537 Value *YS = // (Y << C)
7538 Builder->CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName());
7540 Value *XM = Builder->CreateAnd(V1, ConstantExpr::getShl(CC, Op1),
7541 V1->getName()+".mask");
7543 return BinaryOperator::Create(Op0BO->getOpcode(), XM, YS);
7551 // If the operand is an bitwise operator with a constant RHS, and the
7552 // shift is the only use, we can pull it out of the shift.
7553 if (ConstantInt *Op0C = dyn_cast<ConstantInt>(Op0BO->getOperand(1))) {
7554 bool isValid = true; // Valid only for And, Or, Xor
7555 bool highBitSet = false; // Transform if high bit of constant set?
7557 switch (Op0BO->getOpcode()) {
7558 default: isValid = false; break; // Do not perform transform!
7559 case Instruction::Add:
7560 isValid = isLeftShift;
7562 case Instruction::Or:
7563 case Instruction::Xor:
7566 case Instruction::And:
7571 // If this is a signed shift right, and the high bit is modified
7572 // by the logical operation, do not perform the transformation.
7573 // The highBitSet boolean indicates the value of the high bit of
7574 // the constant which would cause it to be modified for this
7577 if (isValid && I.getOpcode() == Instruction::AShr)
7578 isValid = Op0C->getValue()[TypeBits-1] == highBitSet;
7581 Constant *NewRHS = ConstantExpr::get(I.getOpcode(), Op0C, Op1);
7584 Builder->CreateBinOp(I.getOpcode(), Op0BO->getOperand(0), Op1);
7585 NewShift->takeName(Op0BO);
7587 return BinaryOperator::Create(Op0BO->getOpcode(), NewShift,
7594 // Find out if this is a shift of a shift by a constant.
7595 BinaryOperator *ShiftOp = dyn_cast<BinaryOperator>(Op0);
7596 if (ShiftOp && !ShiftOp->isShift())
7599 if (ShiftOp && isa<ConstantInt>(ShiftOp->getOperand(1))) {
7600 ConstantInt *ShiftAmt1C = cast<ConstantInt>(ShiftOp->getOperand(1));
7601 uint32_t ShiftAmt1 = ShiftAmt1C->getLimitedValue(TypeBits);
7602 uint32_t ShiftAmt2 = Op1->getLimitedValue(TypeBits);
7603 assert(ShiftAmt2 != 0 && "Should have been simplified earlier");
7604 if (ShiftAmt1 == 0) return 0; // Will be simplified in the future.
7605 Value *X = ShiftOp->getOperand(0);
7607 uint32_t AmtSum = ShiftAmt1+ShiftAmt2; // Fold into one big shift.
7609 const IntegerType *Ty = cast<IntegerType>(I.getType());
7611 // Check for (X << c1) << c2 and (X >> c1) >> c2
7612 if (I.getOpcode() == ShiftOp->getOpcode()) {
7613 // If this is oversized composite shift, then unsigned shifts get 0, ashr
7615 if (AmtSum >= TypeBits) {
7616 if (I.getOpcode() != Instruction::AShr)
7617 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
7618 AmtSum = TypeBits-1; // Saturate to 31 for i32 ashr.
7621 return BinaryOperator::Create(I.getOpcode(), X,
7622 ConstantInt::get(Ty, AmtSum));
7625 if (ShiftOp->getOpcode() == Instruction::LShr &&
7626 I.getOpcode() == Instruction::AShr) {
7627 if (AmtSum >= TypeBits)
7628 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
7630 // ((X >>u C1) >>s C2) -> (X >>u (C1+C2)) since C1 != 0.
7631 return BinaryOperator::CreateLShr(X, ConstantInt::get(Ty, AmtSum));
7634 if (ShiftOp->getOpcode() == Instruction::AShr &&
7635 I.getOpcode() == Instruction::LShr) {
7636 // ((X >>s C1) >>u C2) -> ((X >>s (C1+C2)) & mask) since C1 != 0.
7637 if (AmtSum >= TypeBits)
7638 AmtSum = TypeBits-1;
7640 Value *Shift = Builder->CreateAShr(X, ConstantInt::get(Ty, AmtSum));
7642 APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
7643 return BinaryOperator::CreateAnd(Shift, ConstantInt::get(*Context, Mask));
7646 // Okay, if we get here, one shift must be left, and the other shift must be
7647 // right. See if the amounts are equal.
7648 if (ShiftAmt1 == ShiftAmt2) {
7649 // If we have ((X >>? C) << C), turn this into X & (-1 << C).
7650 if (I.getOpcode() == Instruction::Shl) {
7651 APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt1));
7652 return BinaryOperator::CreateAnd(X, ConstantInt::get(*Context, Mask));
7654 // If we have ((X << C) >>u C), turn this into X & (-1 >>u C).
7655 if (I.getOpcode() == Instruction::LShr) {
7656 APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt1));
7657 return BinaryOperator::CreateAnd(X, ConstantInt::get(*Context, Mask));
7659 // We can simplify ((X << C) >>s C) into a trunc + sext.
7660 // NOTE: we could do this for any C, but that would make 'unusual' integer
7661 // types. For now, just stick to ones well-supported by the code
7663 const Type *SExtType = 0;
7664 switch (Ty->getBitWidth() - ShiftAmt1) {
7671 SExtType = IntegerType::get(*Context, Ty->getBitWidth() - ShiftAmt1);
7676 return new SExtInst(Builder->CreateTrunc(X, SExtType, "sext"), Ty);
7677 // Otherwise, we can't handle it yet.
7678 } else if (ShiftAmt1 < ShiftAmt2) {
7679 uint32_t ShiftDiff = ShiftAmt2-ShiftAmt1;
7681 // (X >>? C1) << C2 --> X << (C2-C1) & (-1 << C2)
7682 if (I.getOpcode() == Instruction::Shl) {
7683 assert(ShiftOp->getOpcode() == Instruction::LShr ||
7684 ShiftOp->getOpcode() == Instruction::AShr);
7685 Value *Shift = Builder->CreateShl(X, ConstantInt::get(Ty, ShiftDiff));
7687 APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt2));
7688 return BinaryOperator::CreateAnd(Shift,
7689 ConstantInt::get(*Context, Mask));
7692 // (X << C1) >>u C2 --> X >>u (C2-C1) & (-1 >> C2)
7693 if (I.getOpcode() == Instruction::LShr) {
7694 assert(ShiftOp->getOpcode() == Instruction::Shl);
7695 Value *Shift = Builder->CreateLShr(X, ConstantInt::get(Ty, ShiftDiff));
7697 APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
7698 return BinaryOperator::CreateAnd(Shift,
7699 ConstantInt::get(*Context, Mask));
7702 // We can't handle (X << C1) >>s C2, it shifts arbitrary bits in.
7704 assert(ShiftAmt2 < ShiftAmt1);
7705 uint32_t ShiftDiff = ShiftAmt1-ShiftAmt2;
7707 // (X >>? C1) << C2 --> X >>? (C1-C2) & (-1 << C2)
7708 if (I.getOpcode() == Instruction::Shl) {
7709 assert(ShiftOp->getOpcode() == Instruction::LShr ||
7710 ShiftOp->getOpcode() == Instruction::AShr);
7711 Value *Shift = Builder->CreateBinOp(ShiftOp->getOpcode(), X,
7712 ConstantInt::get(Ty, ShiftDiff));
7714 APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt2));
7715 return BinaryOperator::CreateAnd(Shift,
7716 ConstantInt::get(*Context, Mask));
7719 // (X << C1) >>u C2 --> X << (C1-C2) & (-1 >> C2)
7720 if (I.getOpcode() == Instruction::LShr) {
7721 assert(ShiftOp->getOpcode() == Instruction::Shl);
7722 Value *Shift = Builder->CreateShl(X, ConstantInt::get(Ty, ShiftDiff));
7724 APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
7725 return BinaryOperator::CreateAnd(Shift,
7726 ConstantInt::get(*Context, Mask));
7729 // We can't handle (X << C1) >>a C2, it shifts arbitrary bits in.
7736 /// DecomposeSimpleLinearExpr - Analyze 'Val', seeing if it is a simple linear
7737 /// expression. If so, decompose it, returning some value X, such that Val is
7740 static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
7741 int &Offset, LLVMContext *Context) {
7742 assert(Val->getType() == Type::getInt32Ty(*Context) &&
7743 "Unexpected allocation size type!");
7744 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
7745 Offset = CI->getZExtValue();
7747 return ConstantInt::get(Type::getInt32Ty(*Context), 0);
7748 } else if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) {
7749 if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
7750 if (I->getOpcode() == Instruction::Shl) {
7751 // This is a value scaled by '1 << the shift amt'.
7752 Scale = 1U << RHS->getZExtValue();
7754 return I->getOperand(0);
7755 } else if (I->getOpcode() == Instruction::Mul) {
7756 // This value is scaled by 'RHS'.
7757 Scale = RHS->getZExtValue();
7759 return I->getOperand(0);
7760 } else if (I->getOpcode() == Instruction::Add) {
7761 // We have X+C. Check to see if we really have (X*C2)+C1,
7762 // where C1 is divisible by C2.
7765 DecomposeSimpleLinearExpr(I->getOperand(0), SubScale,
7767 Offset += RHS->getZExtValue();
7774 // Otherwise, we can't look past this.
7781 /// PromoteCastOfAllocation - If we find a cast of an allocation instruction,
7782 /// try to eliminate the cast by moving the type information into the alloc.
7783 Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
7785 const PointerType *PTy = cast<PointerType>(CI.getType());
7787 BuilderTy AllocaBuilder(*Builder);
7788 AllocaBuilder.SetInsertPoint(AI.getParent(), &AI);
7790 // Remove any uses of AI that are dead.
7791 assert(!CI.use_empty() && "Dead instructions should be removed earlier!");
7793 for (Value::use_iterator UI = AI.use_begin(), E = AI.use_end(); UI != E; ) {
7794 Instruction *User = cast<Instruction>(*UI++);
7795 if (isInstructionTriviallyDead(User)) {
7796 while (UI != E && *UI == User)
7797 ++UI; // If this instruction uses AI more than once, don't break UI.
7800 DEBUG(errs() << "IC: DCE: " << *User << '\n');
7801 EraseInstFromFunction(*User);
7805 // This requires TargetData to get the alloca alignment and size information.
7808 // Get the type really allocated and the type casted to.
7809 const Type *AllocElTy = AI.getAllocatedType();
7810 const Type *CastElTy = PTy->getElementType();
7811 if (!AllocElTy->isSized() || !CastElTy->isSized()) return 0;
7813 unsigned AllocElTyAlign = TD->getABITypeAlignment(AllocElTy);
7814 unsigned CastElTyAlign = TD->getABITypeAlignment(CastElTy);
7815 if (CastElTyAlign < AllocElTyAlign) return 0;
7817 // If the allocation has multiple uses, only promote it if we are strictly
7818 // increasing the alignment of the resultant allocation. If we keep it the
7819 // same, we open the door to infinite loops of various kinds. (A reference
7820 // from a dbg.declare doesn't count as a use for this purpose.)
7821 if (!AI.hasOneUse() && !hasOneUsePlusDeclare(&AI) &&
7822 CastElTyAlign == AllocElTyAlign) return 0;
7824 uint64_t AllocElTySize = TD->getTypeAllocSize(AllocElTy);
7825 uint64_t CastElTySize = TD->getTypeAllocSize(CastElTy);
7826 if (CastElTySize == 0 || AllocElTySize == 0) return 0;
7828 // See if we can satisfy the modulus by pulling a scale out of the array
7830 unsigned ArraySizeScale;
7832 Value *NumElements = // See if the array size is a decomposable linear expr.
7833 DecomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale,
7834 ArrayOffset, Context);
7836 // If we can now satisfy the modulus, by using a non-1 scale, we really can
7838 if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 ||
7839 (AllocElTySize*ArrayOffset ) % CastElTySize != 0) return 0;
7841 unsigned Scale = (AllocElTySize*ArraySizeScale)/CastElTySize;
7846 Amt = ConstantInt::get(Type::getInt32Ty(*Context), Scale);
7847 // Insert before the alloca, not before the cast.
7848 Amt = AllocaBuilder.CreateMul(Amt, NumElements, "tmp");
7851 if (int Offset = (AllocElTySize*ArrayOffset)/CastElTySize) {
7852 Value *Off = ConstantInt::get(Type::getInt32Ty(*Context), Offset, true);
7853 Amt = AllocaBuilder.CreateAdd(Amt, Off, "tmp");
7856 AllocaInst *New = AllocaBuilder.CreateAlloca(CastElTy, Amt);
7857 New->setAlignment(AI.getAlignment());
7860 // If the allocation has one real use plus a dbg.declare, just remove the
7862 if (DbgDeclareInst *DI = hasOneUsePlusDeclare(&AI)) {
7863 EraseInstFromFunction(*DI);
7865 // If the allocation has multiple real uses, insert a cast and change all
7866 // things that used it to use the new cast. This will also hack on CI, but it
7868 else if (!AI.hasOneUse()) {
7869 // New is the allocation instruction, pointer typed. AI is the original
7870 // allocation instruction, also pointer typed. Thus, cast to use is BitCast.
7871 Value *NewCast = AllocaBuilder.CreateBitCast(New, AI.getType(), "tmpcast");
7872 AI.replaceAllUsesWith(NewCast);
7874 return ReplaceInstUsesWith(CI, New);
7877 /// CanEvaluateInDifferentType - Return true if we can take the specified value
7878 /// and return it as type Ty without inserting any new casts and without
7879 /// changing the computed value. This is used by code that tries to decide
7880 /// whether promoting or shrinking integer operations to wider or smaller types
7881 /// will allow us to eliminate a truncate or extend.
7883 /// This is a truncation operation if Ty is smaller than V->getType(), or an
7884 /// extension operation if Ty is larger.
7886 /// If CastOpc is a truncation, then Ty will be a type smaller than V. We
7887 /// should return true if trunc(V) can be computed by computing V in the smaller
7888 /// type. If V is an instruction, then trunc(inst(x,y)) can be computed as
7889 /// inst(trunc(x),trunc(y)), which only makes sense if x and y can be
7890 /// efficiently truncated.
7892 /// If CastOpc is a sext or zext, we are asking if the low bits of the value can
7893 /// bit computed in a larger type, which is then and'd or sext_in_reg'd to get
7894 /// the final result.
7895 bool InstCombiner::CanEvaluateInDifferentType(Value *V, const Type *Ty,
7897 int &NumCastsRemoved){
7898 // We can always evaluate constants in another type.
7899 if (isa<Constant>(V))
7902 Instruction *I = dyn_cast<Instruction>(V);
7903 if (!I) return false;
7905 const Type *OrigTy = V->getType();
7907 // If this is an extension or truncate, we can often eliminate it.
7908 if (isa<TruncInst>(I) || isa<ZExtInst>(I) || isa<SExtInst>(I)) {
7909 // If this is a cast from the destination type, we can trivially eliminate
7910 // it, and this will remove a cast overall.
7911 if (I->getOperand(0)->getType() == Ty) {
7912 // If the first operand is itself a cast, and is eliminable, do not count
7913 // this as an eliminable cast. We would prefer to eliminate those two
7915 if (!isa<CastInst>(I->getOperand(0)) && I->hasOneUse())
7921 // We can't extend or shrink something that has multiple uses: doing so would
7922 // require duplicating the instruction in general, which isn't profitable.
7923 if (!I->hasOneUse()) return false;
7925 unsigned Opc = I->getOpcode();
7927 case Instruction::Add:
7928 case Instruction::Sub:
7929 case Instruction::Mul:
7930 case Instruction::And:
7931 case Instruction::Or:
7932 case Instruction::Xor:
7933 // These operators can all arbitrarily be extended or truncated.
7934 return CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc,
7936 CanEvaluateInDifferentType(I->getOperand(1), Ty, CastOpc,
7939 case Instruction::UDiv:
7940 case Instruction::URem: {
7941 // UDiv and URem can be truncated if all the truncated bits are zero.
7942 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
7943 uint32_t BitWidth = Ty->getScalarSizeInBits();
7944 if (BitWidth < OrigBitWidth) {
7945 APInt Mask = APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth);
7946 if (MaskedValueIsZero(I->getOperand(0), Mask) &&
7947 MaskedValueIsZero(I->getOperand(1), Mask)) {
7948 return CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc,
7950 CanEvaluateInDifferentType(I->getOperand(1), Ty, CastOpc,
7956 case Instruction::Shl:
7957 // If we are truncating the result of this SHL, and if it's a shift of a
7958 // constant amount, we can always perform a SHL in a smaller type.
7959 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
7960 uint32_t BitWidth = Ty->getScalarSizeInBits();
7961 if (BitWidth < OrigTy->getScalarSizeInBits() &&
7962 CI->getLimitedValue(BitWidth) < BitWidth)
7963 return CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc,
7967 case Instruction::LShr:
7968 // If this is a truncate of a logical shr, we can truncate it to a smaller
7969 // lshr iff we know that the bits we would otherwise be shifting in are
7971 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
7972 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
7973 uint32_t BitWidth = Ty->getScalarSizeInBits();
7974 if (BitWidth < OrigBitWidth &&
7975 MaskedValueIsZero(I->getOperand(0),
7976 APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth)) &&
7977 CI->getLimitedValue(BitWidth) < BitWidth) {
7978 return CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc,
7983 case Instruction::ZExt:
7984 case Instruction::SExt:
7985 case Instruction::Trunc:
7986 // If this is the same kind of case as our original (e.g. zext+zext), we
7987 // can safely replace it. Note that replacing it does not reduce the number
7988 // of casts in the input.
7992 // sext (zext ty1), ty2 -> zext ty2
7993 if (CastOpc == Instruction::SExt && Opc == Instruction::ZExt)
7996 case Instruction::Select: {
7997 SelectInst *SI = cast<SelectInst>(I);
7998 return CanEvaluateInDifferentType(SI->getTrueValue(), Ty, CastOpc,
8000 CanEvaluateInDifferentType(SI->getFalseValue(), Ty, CastOpc,
8003 case Instruction::PHI: {
8004 // We can change a phi if we can change all operands.
8005 PHINode *PN = cast<PHINode>(I);
8006 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
8007 if (!CanEvaluateInDifferentType(PN->getIncomingValue(i), Ty, CastOpc,
8013 // TODO: Can handle more cases here.
8020 /// EvaluateInDifferentType - Given an expression that
8021 /// CanEvaluateInDifferentType returns true for, actually insert the code to
8022 /// evaluate the expression.
8023 Value *InstCombiner::EvaluateInDifferentType(Value *V, const Type *Ty,
8025 if (Constant *C = dyn_cast<Constant>(V))
8026 return ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/);
8028 // Otherwise, it must be an instruction.
8029 Instruction *I = cast<Instruction>(V);
8030 Instruction *Res = 0;
8031 unsigned Opc = I->getOpcode();
8033 case Instruction::Add:
8034 case Instruction::Sub:
8035 case Instruction::Mul:
8036 case Instruction::And:
8037 case Instruction::Or:
8038 case Instruction::Xor:
8039 case Instruction::AShr:
8040 case Instruction::LShr:
8041 case Instruction::Shl:
8042 case Instruction::UDiv:
8043 case Instruction::URem: {
8044 Value *LHS = EvaluateInDifferentType(I->getOperand(0), Ty, isSigned);
8045 Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
8046 Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS);
8049 case Instruction::Trunc:
8050 case Instruction::ZExt:
8051 case Instruction::SExt:
8052 // If the source type of the cast is the type we're trying for then we can
8053 // just return the source. There's no need to insert it because it is not
8055 if (I->getOperand(0)->getType() == Ty)
8056 return I->getOperand(0);
8058 // Otherwise, must be the same type of cast, so just reinsert a new one.
8059 Res = CastInst::Create(cast<CastInst>(I)->getOpcode(), I->getOperand(0),Ty);
8061 case Instruction::Select: {
8062 Value *True = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
8063 Value *False = EvaluateInDifferentType(I->getOperand(2), Ty, isSigned);
8064 Res = SelectInst::Create(I->getOperand(0), True, False);
8067 case Instruction::PHI: {
8068 PHINode *OPN = cast<PHINode>(I);
8069 PHINode *NPN = PHINode::Create(Ty);
8070 for (unsigned i = 0, e = OPN->getNumIncomingValues(); i != e; ++i) {
8071 Value *V =EvaluateInDifferentType(OPN->getIncomingValue(i), Ty, isSigned);
8072 NPN->addIncoming(V, OPN->getIncomingBlock(i));
8078 // TODO: Can handle more cases here.
8079 llvm_unreachable("Unreachable!");
8084 return InsertNewInstBefore(Res, *I);
8087 /// @brief Implement the transforms common to all CastInst visitors.
8088 Instruction *InstCombiner::commonCastTransforms(CastInst &CI) {
8089 Value *Src = CI.getOperand(0);
8091 // Many cases of "cast of a cast" are eliminable. If it's eliminable we just
8092 // eliminate it now.
8093 if (CastInst *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast
8094 if (Instruction::CastOps opc =
8095 isEliminableCastPair(CSrc, CI.getOpcode(), CI.getType(), TD)) {
8096 // The first cast (CSrc) is eliminable so we need to fix up or replace
8097 // the second cast (CI). CSrc will then have a good chance of being dead.
8098 return CastInst::Create(opc, CSrc->getOperand(0), CI.getType());
8102 // If we are casting a select then fold the cast into the select
8103 if (SelectInst *SI = dyn_cast<SelectInst>(Src))
8104 if (Instruction *NV = FoldOpIntoSelect(CI, SI, this))
8107 // If we are casting a PHI then fold the cast into the PHI
8108 if (isa<PHINode>(Src)) {
8109 // We don't do this if this would create a PHI node with an illegal type if
8110 // it is currently legal.
8111 if (!isa<IntegerType>(Src->getType()) ||
8112 !isa<IntegerType>(CI.getType()) ||
8113 ShouldChangeType(CI.getType(), Src->getType(), TD))
8114 if (Instruction *NV = FoldOpIntoPhi(CI))
8121 /// FindElementAtOffset - Given a type and a constant offset, determine whether
8122 /// or not there is a sequence of GEP indices into the type that will land us at
8123 /// the specified offset. If so, fill them into NewIndices and return the
8124 /// resultant element type, otherwise return null.
8125 static const Type *FindElementAtOffset(const Type *Ty, int64_t Offset,
8126 SmallVectorImpl<Value*> &NewIndices,
8127 const TargetData *TD,
8128 LLVMContext *Context) {
8130 if (!Ty->isSized()) return 0;
8132 // Start with the index over the outer type. Note that the type size
8133 // might be zero (even if the offset isn't zero) if the indexed type
8134 // is something like [0 x {int, int}]
8135 const Type *IntPtrTy = TD->getIntPtrType(*Context);
8136 int64_t FirstIdx = 0;
8137 if (int64_t TySize = TD->getTypeAllocSize(Ty)) {
8138 FirstIdx = Offset/TySize;
8139 Offset -= FirstIdx*TySize;
8141 // Handle hosts where % returns negative instead of values [0..TySize).
8145 assert(Offset >= 0);
8147 assert((uint64_t)Offset < (uint64_t)TySize && "Out of range offset");
8150 NewIndices.push_back(ConstantInt::get(IntPtrTy, FirstIdx));
8152 // Index into the types. If we fail, set OrigBase to null.
8154 // Indexing into tail padding between struct/array elements.
8155 if (uint64_t(Offset*8) >= TD->getTypeSizeInBits(Ty))
8158 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
8159 const StructLayout *SL = TD->getStructLayout(STy);
8160 assert(Offset < (int64_t)SL->getSizeInBytes() &&
8161 "Offset must stay within the indexed type");
8163 unsigned Elt = SL->getElementContainingOffset(Offset);
8164 NewIndices.push_back(ConstantInt::get(Type::getInt32Ty(*Context), Elt));
8166 Offset -= SL->getElementOffset(Elt);
8167 Ty = STy->getElementType(Elt);
8168 } else if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
8169 uint64_t EltSize = TD->getTypeAllocSize(AT->getElementType());
8170 assert(EltSize && "Cannot index into a zero-sized array");
8171 NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));
8173 Ty = AT->getElementType();
8175 // Otherwise, we can't index into the middle of this atomic type, bail.
8183 /// @brief Implement the transforms for cast of pointer (bitcast/ptrtoint)
8184 Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
8185 Value *Src = CI.getOperand(0);
8187 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Src)) {
8188 // If casting the result of a getelementptr instruction with no offset, turn
8189 // this into a cast of the original pointer!
8190 if (GEP->hasAllZeroIndices()) {
8191 // Changing the cast operand is usually not a good idea but it is safe
8192 // here because the pointer operand is being replaced with another
8193 // pointer operand so the opcode doesn't need to change.
8195 CI.setOperand(0, GEP->getOperand(0));
8199 // If the GEP has a single use, and the base pointer is a bitcast, and the
8200 // GEP computes a constant offset, see if we can convert these three
8201 // instructions into fewer. This typically happens with unions and other
8202 // non-type-safe code.
8203 if (TD && GEP->hasOneUse() && isa<BitCastInst>(GEP->getOperand(0))) {
8204 if (GEP->hasAllConstantIndices()) {
8205 // We are guaranteed to get a constant from EmitGEPOffset.
8206 ConstantInt *OffsetV = cast<ConstantInt>(EmitGEPOffset(GEP, *this));
8207 int64_t Offset = OffsetV->getSExtValue();
8209 // Get the base pointer input of the bitcast, and the type it points to.
8210 Value *OrigBase = cast<BitCastInst>(GEP->getOperand(0))->getOperand(0);
8211 const Type *GEPIdxTy =
8212 cast<PointerType>(OrigBase->getType())->getElementType();
8213 SmallVector<Value*, 8> NewIndices;
8214 if (FindElementAtOffset(GEPIdxTy, Offset, NewIndices, TD, Context)) {
8215 // If we were able to index down into an element, create the GEP
8216 // and bitcast the result. This eliminates one bitcast, potentially
8218 Value *NGEP = cast<GEPOperator>(GEP)->isInBounds() ?
8219 Builder->CreateInBoundsGEP(OrigBase,
8220 NewIndices.begin(), NewIndices.end()) :
8221 Builder->CreateGEP(OrigBase, NewIndices.begin(), NewIndices.end());
8222 NGEP->takeName(GEP);
8224 if (isa<BitCastInst>(CI))
8225 return new BitCastInst(NGEP, CI.getType());
8226 assert(isa<PtrToIntInst>(CI));
8227 return new PtrToIntInst(NGEP, CI.getType());
8233 return commonCastTransforms(CI);
8236 /// commonIntCastTransforms - This function implements the common transforms
8237 /// for trunc, zext, and sext.
8238 Instruction *InstCombiner::commonIntCastTransforms(CastInst &CI) {
8239 if (Instruction *Result = commonCastTransforms(CI))
8242 Value *Src = CI.getOperand(0);
8243 const Type *SrcTy = Src->getType();
8244 const Type *DestTy = CI.getType();
8245 uint32_t SrcBitSize = SrcTy->getScalarSizeInBits();
8246 uint32_t DestBitSize = DestTy->getScalarSizeInBits();
8248 // See if we can simplify any instructions used by the LHS whose sole
8249 // purpose is to compute bits we don't care about.
8250 if (SimplifyDemandedInstructionBits(CI))
8253 // If the source isn't an instruction or has more than one use then we
8254 // can't do anything more.
8255 Instruction *SrcI = dyn_cast<Instruction>(Src);
8256 if (!SrcI || !Src->hasOneUse())
8259 // Attempt to propagate the cast into the instruction for int->int casts.
8260 int NumCastsRemoved = 0;
8261 // Only do this if the dest type is a simple type, don't convert the
8262 // expression tree to something weird like i93 unless the source is also
8264 if ((isa<VectorType>(DestTy) ||
8265 ShouldChangeType(SrcI->getType(), DestTy, TD)) &&
8266 CanEvaluateInDifferentType(SrcI, DestTy,
8267 CI.getOpcode(), NumCastsRemoved)) {
8268 // If this cast is a truncate, evaluting in a different type always
8269 // eliminates the cast, so it is always a win. If this is a zero-extension,
8270 // we need to do an AND to maintain the clear top-part of the computation,
8271 // so we require that the input have eliminated at least one cast. If this
8272 // is a sign extension, we insert two new casts (to do the extension) so we
8273 // require that two casts have been eliminated.
8274 bool DoXForm = false;
8275 bool JustReplace = false;
8276 switch (CI.getOpcode()) {
8278 // All the others use floating point so we shouldn't actually
8279 // get here because of the check above.
8280 llvm_unreachable("Unknown cast type");
8281 case Instruction::Trunc:
8284 case Instruction::ZExt: {
8285 DoXForm = NumCastsRemoved >= 1;
8287 if (!DoXForm && 0) {
8288 // If it's unnecessary to issue an AND to clear the high bits, it's
8289 // always profitable to do this xform.
8290 Value *TryRes = EvaluateInDifferentType(SrcI, DestTy, false);
8291 APInt Mask(APInt::getBitsSet(DestBitSize, SrcBitSize, DestBitSize));
8292 if (MaskedValueIsZero(TryRes, Mask))
8293 return ReplaceInstUsesWith(CI, TryRes);
8295 if (Instruction *TryI = dyn_cast<Instruction>(TryRes))
8296 if (TryI->use_empty())
8297 EraseInstFromFunction(*TryI);
8301 case Instruction::SExt: {
8302 DoXForm = NumCastsRemoved >= 2;
8303 if (!DoXForm && !isa<TruncInst>(SrcI) && 0) {
8304 // If we do not have to emit the truncate + sext pair, then it's always
8305 // profitable to do this xform.
8307 // It's not safe to eliminate the trunc + sext pair if one of the
8308 // eliminated cast is a truncate. e.g.
8309 // t2 = trunc i32 t1 to i16
8310 // t3 = sext i16 t2 to i32
8313 Value *TryRes = EvaluateInDifferentType(SrcI, DestTy, true);
8314 unsigned NumSignBits = ComputeNumSignBits(TryRes);
8315 if (NumSignBits > (DestBitSize - SrcBitSize))
8316 return ReplaceInstUsesWith(CI, TryRes);
8318 if (Instruction *TryI = dyn_cast<Instruction>(TryRes))
8319 if (TryI->use_empty())
8320 EraseInstFromFunction(*TryI);
8327 DEBUG(errs() << "ICE: EvaluateInDifferentType converting expression type"
8328 " to avoid cast: " << CI);
8329 Value *Res = EvaluateInDifferentType(SrcI, DestTy,
8330 CI.getOpcode() == Instruction::SExt);
8332 // Just replace this cast with the result.
8333 return ReplaceInstUsesWith(CI, Res);
8335 assert(Res->getType() == DestTy);
8336 switch (CI.getOpcode()) {
8337 default: llvm_unreachable("Unknown cast type!");
8338 case Instruction::Trunc:
8339 // Just replace this cast with the result.
8340 return ReplaceInstUsesWith(CI, Res);
8341 case Instruction::ZExt: {
8342 assert(SrcBitSize < DestBitSize && "Not a zext?");
8344 // If the high bits are already zero, just replace this cast with the
8346 APInt Mask(APInt::getBitsSet(DestBitSize, SrcBitSize, DestBitSize));
8347 if (MaskedValueIsZero(Res, Mask))
8348 return ReplaceInstUsesWith(CI, Res);
8350 // We need to emit an AND to clear the high bits.
8351 Constant *C = ConstantInt::get(*Context,
8352 APInt::getLowBitsSet(DestBitSize, SrcBitSize));
8353 return BinaryOperator::CreateAnd(Res, C);
8355 case Instruction::SExt: {
8356 // If the high bits are already filled with sign bit, just replace this
8357 // cast with the result.
8358 unsigned NumSignBits = ComputeNumSignBits(Res);
8359 if (NumSignBits > (DestBitSize - SrcBitSize))
8360 return ReplaceInstUsesWith(CI, Res);
8362 // We need to emit a cast to truncate, then a cast to sext.
8363 return new SExtInst(Builder->CreateTrunc(Res, Src->getType()), DestTy);
8369 Value *Op0 = SrcI->getNumOperands() > 0 ? SrcI->getOperand(0) : 0;
8370 Value *Op1 = SrcI->getNumOperands() > 1 ? SrcI->getOperand(1) : 0;
8372 switch (SrcI->getOpcode()) {
8373 case Instruction::Add:
8374 case Instruction::Mul:
8375 case Instruction::And:
8376 case Instruction::Or:
8377 case Instruction::Xor:
8378 // If we are discarding information, rewrite.
8379 if (DestBitSize < SrcBitSize && DestBitSize != 1) {
8380 // Don't insert two casts unless at least one can be eliminated.
8381 if (!ValueRequiresCast(CI.getOpcode(), Op1, DestTy, TD) ||
8382 !ValueRequiresCast(CI.getOpcode(), Op0, DestTy, TD)) {
8383 Value *Op0c = Builder->CreateTrunc(Op0, DestTy, Op0->getName());
8384 Value *Op1c = Builder->CreateTrunc(Op1, DestTy, Op1->getName());
8385 return BinaryOperator::Create(
8386 cast<BinaryOperator>(SrcI)->getOpcode(), Op0c, Op1c);
8390 // cast (xor bool X, true) to int --> xor (cast bool X to int), 1
8391 if (isa<ZExtInst>(CI) && SrcBitSize == 1 &&
8392 SrcI->getOpcode() == Instruction::Xor &&
8393 Op1 == ConstantInt::getTrue(*Context) &&
8394 (!Op0->hasOneUse() || !isa<CmpInst>(Op0))) {
8395 Value *New = Builder->CreateZExt(Op0, DestTy, Op0->getName());
8396 return BinaryOperator::CreateXor(New,
8397 ConstantInt::get(CI.getType(), 1));
8401 case Instruction::Shl: {
8402 // Canonicalize trunc inside shl, if we can.
8403 ConstantInt *CI = dyn_cast<ConstantInt>(Op1);
8404 if (CI && DestBitSize < SrcBitSize &&
8405 CI->getLimitedValue(DestBitSize) < DestBitSize) {
8406 Value *Op0c = Builder->CreateTrunc(Op0, DestTy, Op0->getName());
8407 Value *Op1c = Builder->CreateTrunc(Op1, DestTy, Op1->getName());
8408 return BinaryOperator::CreateShl(Op0c, Op1c);
8416 Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
8417 if (Instruction *Result = commonIntCastTransforms(CI))
8420 Value *Src = CI.getOperand(0);
8421 const Type *Ty = CI.getType();
8422 uint32_t DestBitWidth = Ty->getScalarSizeInBits();
8423 uint32_t SrcBitWidth = Src->getType()->getScalarSizeInBits();
8425 // Canonicalize trunc x to i1 -> (icmp ne (and x, 1), 0)
8426 if (DestBitWidth == 1) {
8427 Constant *One = ConstantInt::get(Src->getType(), 1);
8428 Src = Builder->CreateAnd(Src, One, "tmp");
8429 Value *Zero = Constant::getNullValue(Src->getType());
8430 return new ICmpInst(ICmpInst::ICMP_NE, Src, Zero);
8433 // Optimize trunc(lshr(), c) to pull the shift through the truncate.
8434 ConstantInt *ShAmtV = 0;
8436 if (Src->hasOneUse() &&
8437 match(Src, m_LShr(m_Value(ShiftOp), m_ConstantInt(ShAmtV)))) {
8438 uint32_t ShAmt = ShAmtV->getLimitedValue(SrcBitWidth);
8440 // Get a mask for the bits shifting in.
8441 APInt Mask(APInt::getLowBitsSet(SrcBitWidth, ShAmt).shl(DestBitWidth));
8442 if (MaskedValueIsZero(ShiftOp, Mask)) {
8443 if (ShAmt >= DestBitWidth) // All zeros.
8444 return ReplaceInstUsesWith(CI, Constant::getNullValue(Ty));
8446 // Okay, we can shrink this. Truncate the input, then return a new
8448 Value *V1 = Builder->CreateTrunc(ShiftOp, Ty, ShiftOp->getName());
8449 Value *V2 = ConstantExpr::getTrunc(ShAmtV, Ty);
8450 return BinaryOperator::CreateLShr(V1, V2);
8457 /// transformZExtICmp - Transform (zext icmp) to bitwise / integer operations
8458 /// in order to eliminate the icmp.
8459 Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
8461 // If we are just checking for a icmp eq of a single bit and zext'ing it
8462 // to an integer, then shift the bit to the appropriate place and then
8463 // cast to integer to avoid the comparison.
8464 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(ICI->getOperand(1))) {
8465 const APInt &Op1CV = Op1C->getValue();
8467 // zext (x <s 0) to i32 --> x>>u31 true if signbit set.
8468 // zext (x >s -1) to i32 --> (x>>u31)^1 true if signbit clear.
8469 if ((ICI->getPredicate() == ICmpInst::ICMP_SLT && Op1CV == 0) ||
8470 (ICI->getPredicate() == ICmpInst::ICMP_SGT &&Op1CV.isAllOnesValue())) {
8471 if (!DoXform) return ICI;
8473 Value *In = ICI->getOperand(0);
8474 Value *Sh = ConstantInt::get(In->getType(),
8475 In->getType()->getScalarSizeInBits()-1);
8476 In = Builder->CreateLShr(In, Sh, In->getName()+".lobit");
8477 if (In->getType() != CI.getType())
8478 In = Builder->CreateIntCast(In, CI.getType(), false/*ZExt*/, "tmp");
8480 if (ICI->getPredicate() == ICmpInst::ICMP_SGT) {
8481 Constant *One = ConstantInt::get(In->getType(), 1);
8482 In = Builder->CreateXor(In, One, In->getName()+".not");
8485 return ReplaceInstUsesWith(CI, In);
8490 // zext (X == 0) to i32 --> X^1 iff X has only the low bit set.
8491 // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
8492 // zext (X == 1) to i32 --> X iff X has only the low bit set.
8493 // zext (X == 2) to i32 --> X>>1 iff X has only the 2nd bit set.
8494 // zext (X != 0) to i32 --> X iff X has only the low bit set.
8495 // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set.
8496 // zext (X != 1) to i32 --> X^1 iff X has only the low bit set.
8497 // zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
8498 if ((Op1CV == 0 || Op1CV.isPowerOf2()) &&
8499 // This only works for EQ and NE
8500 ICI->isEquality()) {
8501 // If Op1C some other power of two, convert:
8502 uint32_t BitWidth = Op1C->getType()->getBitWidth();
8503 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
8504 APInt TypeMask(APInt::getAllOnesValue(BitWidth));
8505 ComputeMaskedBits(ICI->getOperand(0), TypeMask, KnownZero, KnownOne);
8507 APInt KnownZeroMask(~KnownZero);
8508 if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1?
8509 if (!DoXform) return ICI;
8511 bool isNE = ICI->getPredicate() == ICmpInst::ICMP_NE;
8512 if (Op1CV != 0 && (Op1CV != KnownZeroMask)) {
8513 // (X&4) == 2 --> false
8514 // (X&4) != 2 --> true
8515 Constant *Res = ConstantInt::get(Type::getInt1Ty(*Context), isNE);
8516 Res = ConstantExpr::getZExt(Res, CI.getType());
8517 return ReplaceInstUsesWith(CI, Res);
8520 uint32_t ShiftAmt = KnownZeroMask.logBase2();
8521 Value *In = ICI->getOperand(0);
8523 // Perform a logical shr by shiftamt.
8524 // Insert the shift to put the result in the low bit.
8525 In = Builder->CreateLShr(In, ConstantInt::get(In->getType(),ShiftAmt),
8526 In->getName()+".lobit");
8529 if ((Op1CV != 0) == isNE) { // Toggle the low bit.
8530 Constant *One = ConstantInt::get(In->getType(), 1);
8531 In = Builder->CreateXor(In, One, "tmp");
8534 if (CI.getType() == In->getType())
8535 return ReplaceInstUsesWith(CI, In);
8537 return CastInst::CreateIntegerCast(In, CI.getType(), false/*ZExt*/);
8542 // icmp ne A, B is equal to xor A, B when A and B only really have one bit.
8543 // It is also profitable to transform icmp eq into not(xor(A, B)) because that
8544 // may lead to additional simplifications.
8545 if (ICI->isEquality() && CI.getType() == ICI->getOperand(0)->getType()) {
8546 if (const IntegerType *ITy = dyn_cast<IntegerType>(CI.getType())) {
8547 uint32_t BitWidth = ITy->getBitWidth();
8549 Value *LHS = ICI->getOperand(0);
8550 Value *RHS = ICI->getOperand(1);
8552 APInt KnownZeroLHS(BitWidth, 0), KnownOneLHS(BitWidth, 0);
8553 APInt KnownZeroRHS(BitWidth, 0), KnownOneRHS(BitWidth, 0);
8554 APInt TypeMask(APInt::getHighBitsSet(BitWidth, BitWidth-1));
8555 ComputeMaskedBits(LHS, TypeMask, KnownZeroLHS, KnownOneLHS);
8556 ComputeMaskedBits(RHS, TypeMask, KnownZeroRHS, KnownOneRHS);
8558 if (KnownZeroLHS.countLeadingOnes() == BitWidth-1 &&
8559 KnownZeroRHS.countLeadingOnes() == BitWidth-1) {
8560 if (!DoXform) return ICI;
8562 Value *Xor = Builder->CreateXor(LHS, RHS);
8563 if (ICI->getPredicate() == ICmpInst::ICMP_EQ)
8564 Xor = Builder->CreateXor(Xor, ConstantInt::get(ITy, 1));
8566 return ReplaceInstUsesWith(CI, Xor);
8575 Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
8576 // If one of the common conversion will work ..
8577 if (Instruction *Result = commonIntCastTransforms(CI))
8580 Value *Src = CI.getOperand(0);
8582 // If this is a TRUNC followed by a ZEXT then we are dealing with integral
8583 // types and if the sizes are just right we can convert this into a logical
8584 // 'and' which will be much cheaper than the pair of casts.
8585 if (TruncInst *CSrc = dyn_cast<TruncInst>(Src)) { // A->B->C cast
8586 // Get the sizes of the types involved. We know that the intermediate type
8587 // will be smaller than A or C, but don't know the relation between A and C.
8588 Value *A = CSrc->getOperand(0);
8589 unsigned SrcSize = A->getType()->getScalarSizeInBits();
8590 unsigned MidSize = CSrc->getType()->getScalarSizeInBits();
8591 unsigned DstSize = CI.getType()->getScalarSizeInBits();
8592 // If we're actually extending zero bits, then if
8593 // SrcSize < DstSize: zext(a & mask)
8594 // SrcSize == DstSize: a & mask
8595 // SrcSize > DstSize: trunc(a) & mask
8596 if (SrcSize < DstSize) {
8597 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
8598 Constant *AndConst = ConstantInt::get(A->getType(), AndValue);
8599 Value *And = Builder->CreateAnd(A, AndConst, CSrc->getName()+".mask");
8600 return new ZExtInst(And, CI.getType());
8603 if (SrcSize == DstSize) {
8604 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
8605 return BinaryOperator::CreateAnd(A, ConstantInt::get(A->getType(),
8608 if (SrcSize > DstSize) {
8609 Value *Trunc = Builder->CreateTrunc(A, CI.getType(), "tmp");
8610 APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize));
8611 return BinaryOperator::CreateAnd(Trunc,
8612 ConstantInt::get(Trunc->getType(),
8617 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src))
8618 return transformZExtICmp(ICI, CI);
8620 BinaryOperator *SrcI = dyn_cast<BinaryOperator>(Src);
8621 if (SrcI && SrcI->getOpcode() == Instruction::Or) {
8622 // zext (or icmp, icmp) --> or (zext icmp), (zext icmp) if at least one
8623 // of the (zext icmp) will be transformed.
8624 ICmpInst *LHS = dyn_cast<ICmpInst>(SrcI->getOperand(0));
8625 ICmpInst *RHS = dyn_cast<ICmpInst>(SrcI->getOperand(1));
8626 if (LHS && RHS && LHS->hasOneUse() && RHS->hasOneUse() &&
8627 (transformZExtICmp(LHS, CI, false) ||
8628 transformZExtICmp(RHS, CI, false))) {
8629 Value *LCast = Builder->CreateZExt(LHS, CI.getType(), LHS->getName());
8630 Value *RCast = Builder->CreateZExt(RHS, CI.getType(), RHS->getName());
8631 return BinaryOperator::Create(Instruction::Or, LCast, RCast);
8635 // zext(trunc(t) & C) -> (t & zext(C)).
8636 if (SrcI && SrcI->getOpcode() == Instruction::And && SrcI->hasOneUse())
8637 if (ConstantInt *C = dyn_cast<ConstantInt>(SrcI->getOperand(1)))
8638 if (TruncInst *TI = dyn_cast<TruncInst>(SrcI->getOperand(0))) {
8639 Value *TI0 = TI->getOperand(0);
8640 if (TI0->getType() == CI.getType())
8642 BinaryOperator::CreateAnd(TI0,
8643 ConstantExpr::getZExt(C, CI.getType()));
8646 // zext((trunc(t) & C) ^ C) -> ((t & zext(C)) ^ zext(C)).
8647 if (SrcI && SrcI->getOpcode() == Instruction::Xor && SrcI->hasOneUse())
8648 if (ConstantInt *C = dyn_cast<ConstantInt>(SrcI->getOperand(1)))
8649 if (BinaryOperator *And = dyn_cast<BinaryOperator>(SrcI->getOperand(0)))
8650 if (And->getOpcode() == Instruction::And && And->hasOneUse() &&
8651 And->getOperand(1) == C)
8652 if (TruncInst *TI = dyn_cast<TruncInst>(And->getOperand(0))) {
8653 Value *TI0 = TI->getOperand(0);
8654 if (TI0->getType() == CI.getType()) {
8655 Constant *ZC = ConstantExpr::getZExt(C, CI.getType());
8656 Value *NewAnd = Builder->CreateAnd(TI0, ZC, "tmp");
8657 return BinaryOperator::CreateXor(NewAnd, ZC);
8664 Instruction *InstCombiner::visitSExt(SExtInst &CI) {
8665 if (Instruction *I = commonIntCastTransforms(CI))
8668 Value *Src = CI.getOperand(0);
8670 // Canonicalize sign-extend from i1 to a select.
8671 if (Src->getType() == Type::getInt1Ty(*Context))
8672 return SelectInst::Create(Src,
8673 Constant::getAllOnesValue(CI.getType()),
8674 Constant::getNullValue(CI.getType()));
8676 // See if the value being truncated is already sign extended. If so, just
8677 // eliminate the trunc/sext pair.
8678 if (Operator::getOpcode(Src) == Instruction::Trunc) {
8679 Value *Op = cast<User>(Src)->getOperand(0);
8680 unsigned OpBits = Op->getType()->getScalarSizeInBits();
8681 unsigned MidBits = Src->getType()->getScalarSizeInBits();
8682 unsigned DestBits = CI.getType()->getScalarSizeInBits();
8683 unsigned NumSignBits = ComputeNumSignBits(Op);
8685 if (OpBits == DestBits) {
8686 // Op is i32, Mid is i8, and Dest is i32. If Op has more than 24 sign
8687 // bits, it is already ready.
8688 if (NumSignBits > DestBits-MidBits)
8689 return ReplaceInstUsesWith(CI, Op);
8690 } else if (OpBits < DestBits) {
8691 // Op is i32, Mid is i8, and Dest is i64. If Op has more than 24 sign
8692 // bits, just sext from i32.
8693 if (NumSignBits > OpBits-MidBits)
8694 return new SExtInst(Op, CI.getType(), "tmp");
8696 // Op is i64, Mid is i8, and Dest is i32. If Op has more than 56 sign
8697 // bits, just truncate to i32.
8698 if (NumSignBits > OpBits-MidBits)
8699 return new TruncInst(Op, CI.getType(), "tmp");
8703 // If the input is a shl/ashr pair of a same constant, then this is a sign
8704 // extension from a smaller value. If we could trust arbitrary bitwidth
8705 // integers, we could turn this into a truncate to the smaller bit and then
8706 // use a sext for the whole extension. Since we don't, look deeper and check
8707 // for a truncate. If the source and dest are the same type, eliminate the
8708 // trunc and extend and just do shifts. For example, turn:
8709 // %a = trunc i32 %i to i8
8710 // %b = shl i8 %a, 6
8711 // %c = ashr i8 %b, 6
8712 // %d = sext i8 %c to i32
8714 // %a = shl i32 %i, 30
8715 // %d = ashr i32 %a, 30
8717 ConstantInt *BA = 0, *CA = 0;
8718 if (match(Src, m_AShr(m_Shl(m_Value(A), m_ConstantInt(BA)),
8719 m_ConstantInt(CA))) &&
8720 BA == CA && isa<TruncInst>(A)) {
8721 Value *I = cast<TruncInst>(A)->getOperand(0);
8722 if (I->getType() == CI.getType()) {
8723 unsigned MidSize = Src->getType()->getScalarSizeInBits();
8724 unsigned SrcDstSize = CI.getType()->getScalarSizeInBits();
8725 unsigned ShAmt = CA->getZExtValue()+SrcDstSize-MidSize;
8726 Constant *ShAmtV = ConstantInt::get(CI.getType(), ShAmt);
8727 I = Builder->CreateShl(I, ShAmtV, CI.getName());
8728 return BinaryOperator::CreateAShr(I, ShAmtV);
8735 /// FitsInFPType - Return a Constant* for the specified FP constant if it fits
8736 /// in the specified FP type without changing its value.
8737 static Constant *FitsInFPType(ConstantFP *CFP, const fltSemantics &Sem,
8738 LLVMContext *Context) {
8740 APFloat F = CFP->getValueAPF();
8741 (void)F.convert(Sem, APFloat::rmNearestTiesToEven, &losesInfo);
8743 return ConstantFP::get(*Context, F);
8747 /// LookThroughFPExtensions - If this is an fp extension instruction, look
8748 /// through it until we get the source value.
8749 static Value *LookThroughFPExtensions(Value *V, LLVMContext *Context) {
8750 if (Instruction *I = dyn_cast<Instruction>(V))
8751 if (I->getOpcode() == Instruction::FPExt)
8752 return LookThroughFPExtensions(I->getOperand(0), Context);
8754 // If this value is a constant, return the constant in the smallest FP type
8755 // that can accurately represent it. This allows us to turn
8756 // (float)((double)X+2.0) into x+2.0f.
8757 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
8758 if (CFP->getType() == Type::getPPC_FP128Ty(*Context))
8759 return V; // No constant folding of this.
8760 // See if the value can be truncated to float and then reextended.
8761 if (Value *V = FitsInFPType(CFP, APFloat::IEEEsingle, Context))
8763 if (CFP->getType() == Type::getDoubleTy(*Context))
8764 return V; // Won't shrink.
8765 if (Value *V = FitsInFPType(CFP, APFloat::IEEEdouble, Context))
8767 // Don't try to shrink to various long double types.
8773 Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
8774 if (Instruction *I = commonCastTransforms(CI))
8777 // If we have fptrunc(fadd (fpextend x), (fpextend y)), where x and y are
8778 // smaller than the destination type, we can eliminate the truncate by doing
8779 // the add as the smaller type. This applies to fadd/fsub/fmul/fdiv as well as
8780 // many builtins (sqrt, etc).
8781 BinaryOperator *OpI = dyn_cast<BinaryOperator>(CI.getOperand(0));
8782 if (OpI && OpI->hasOneUse()) {
8783 switch (OpI->getOpcode()) {
8785 case Instruction::FAdd:
8786 case Instruction::FSub:
8787 case Instruction::FMul:
8788 case Instruction::FDiv:
8789 case Instruction::FRem:
8790 const Type *SrcTy = OpI->getType();
8791 Value *LHSTrunc = LookThroughFPExtensions(OpI->getOperand(0), Context);
8792 Value *RHSTrunc = LookThroughFPExtensions(OpI->getOperand(1), Context);
8793 if (LHSTrunc->getType() != SrcTy &&
8794 RHSTrunc->getType() != SrcTy) {
8795 unsigned DstSize = CI.getType()->getScalarSizeInBits();
8796 // If the source types were both smaller than the destination type of
8797 // the cast, do this xform.
8798 if (LHSTrunc->getType()->getScalarSizeInBits() <= DstSize &&
8799 RHSTrunc->getType()->getScalarSizeInBits() <= DstSize) {
8800 LHSTrunc = Builder->CreateFPExt(LHSTrunc, CI.getType());
8801 RHSTrunc = Builder->CreateFPExt(RHSTrunc, CI.getType());
8802 return BinaryOperator::Create(OpI->getOpcode(), LHSTrunc, RHSTrunc);
8811 Instruction *InstCombiner::visitFPExt(CastInst &CI) {
8812 return commonCastTransforms(CI);
8815 Instruction *InstCombiner::visitFPToUI(FPToUIInst &FI) {
8816 Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0));
8818 return commonCastTransforms(FI);
8820 // fptoui(uitofp(X)) --> X
8821 // fptoui(sitofp(X)) --> X
8822 // This is safe if the intermediate type has enough bits in its mantissa to
8823 // accurately represent all values of X. For example, do not do this with
8824 // i64->float->i64. This is also safe for sitofp case, because any negative
8825 // 'X' value would cause an undefined result for the fptoui.
8826 if ((isa<UIToFPInst>(OpI) || isa<SIToFPInst>(OpI)) &&
8827 OpI->getOperand(0)->getType() == FI.getType() &&
8828 (int)FI.getType()->getScalarSizeInBits() < /*extra bit for sign */
8829 OpI->getType()->getFPMantissaWidth())
8830 return ReplaceInstUsesWith(FI, OpI->getOperand(0));
8832 return commonCastTransforms(FI);
8835 Instruction *InstCombiner::visitFPToSI(FPToSIInst &FI) {
8836 Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0));
8838 return commonCastTransforms(FI);
8840 // fptosi(sitofp(X)) --> X
8841 // fptosi(uitofp(X)) --> X
8842 // This is safe if the intermediate type has enough bits in its mantissa to
8843 // accurately represent all values of X. For example, do not do this with
8844 // i64->float->i64. This is also safe for sitofp case, because any negative
8845 // 'X' value would cause an undefined result for the fptoui.
8846 if ((isa<UIToFPInst>(OpI) || isa<SIToFPInst>(OpI)) &&
8847 OpI->getOperand(0)->getType() == FI.getType() &&
8848 (int)FI.getType()->getScalarSizeInBits() <=
8849 OpI->getType()->getFPMantissaWidth())
8850 return ReplaceInstUsesWith(FI, OpI->getOperand(0));
8852 return commonCastTransforms(FI);
8855 Instruction *InstCombiner::visitUIToFP(CastInst &CI) {
8856 return commonCastTransforms(CI);
8859 Instruction *InstCombiner::visitSIToFP(CastInst &CI) {
8860 return commonCastTransforms(CI);
8863 Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) {
8864 // If the destination integer type is smaller than the intptr_t type for
8865 // this target, do a ptrtoint to intptr_t then do a trunc. This allows the
8866 // trunc to be exposed to other transforms. Don't do this for extending
8867 // ptrtoint's, because we don't know if the target sign or zero extends its
8870 CI.getType()->getScalarSizeInBits() < TD->getPointerSizeInBits()) {
8871 Value *P = Builder->CreatePtrToInt(CI.getOperand(0),
8872 TD->getIntPtrType(CI.getContext()),
8874 return new TruncInst(P, CI.getType());
8877 return commonPointerCastTransforms(CI);
8880 Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
8881 // If the source integer type is larger than the intptr_t type for
8882 // this target, do a trunc to the intptr_t type, then inttoptr of it. This
8883 // allows the trunc to be exposed to other transforms. Don't do this for
8884 // extending inttoptr's, because we don't know if the target sign or zero
8885 // extends to pointers.
8886 if (TD && CI.getOperand(0)->getType()->getScalarSizeInBits() >
8887 TD->getPointerSizeInBits()) {
8888 Value *P = Builder->CreateTrunc(CI.getOperand(0),
8889 TD->getIntPtrType(CI.getContext()), "tmp");
8890 return new IntToPtrInst(P, CI.getType());
8893 if (Instruction *I = commonCastTransforms(CI))
8899 Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
8900 // If the operands are integer typed then apply the integer transforms,
8901 // otherwise just apply the common ones.
8902 Value *Src = CI.getOperand(0);
8903 const Type *SrcTy = Src->getType();
8904 const Type *DestTy = CI.getType();
8906 if (isa<PointerType>(SrcTy)) {
8907 if (Instruction *I = commonPointerCastTransforms(CI))
8910 if (Instruction *Result = commonCastTransforms(CI))
8915 // Get rid of casts from one type to the same type. These are useless and can
8916 // be replaced by the operand.
8917 if (DestTy == Src->getType())
8918 return ReplaceInstUsesWith(CI, Src);
8920 if (const PointerType *DstPTy = dyn_cast<PointerType>(DestTy)) {
8921 const PointerType *SrcPTy = cast<PointerType>(SrcTy);
8922 const Type *DstElTy = DstPTy->getElementType();
8923 const Type *SrcElTy = SrcPTy->getElementType();
8925 // If the address spaces don't match, don't eliminate the bitcast, which is
8926 // required for changing types.
8927 if (SrcPTy->getAddressSpace() != DstPTy->getAddressSpace())
8930 // If we are casting a alloca to a pointer to a type of the same
8931 // size, rewrite the allocation instruction to allocate the "right" type.
8932 // There is no need to modify malloc calls because it is their bitcast that
8933 // needs to be cleaned up.
8934 if (AllocaInst *AI = dyn_cast<AllocaInst>(Src))
8935 if (Instruction *V = PromoteCastOfAllocation(CI, *AI))
8938 // If the source and destination are pointers, and this cast is equivalent
8939 // to a getelementptr X, 0, 0, 0... turn it into the appropriate gep.
8940 // This can enhance SROA and other transforms that want type-safe pointers.
8941 Constant *ZeroUInt = Constant::getNullValue(Type::getInt32Ty(*Context));
8942 unsigned NumZeros = 0;
8943 while (SrcElTy != DstElTy &&
8944 isa<CompositeType>(SrcElTy) && !isa<PointerType>(SrcElTy) &&
8945 SrcElTy->getNumContainedTypes() /* not "{}" */) {
8946 SrcElTy = cast<CompositeType>(SrcElTy)->getTypeAtIndex(ZeroUInt);
8950 // If we found a path from the src to dest, create the getelementptr now.
8951 if (SrcElTy == DstElTy) {
8952 SmallVector<Value*, 8> Idxs(NumZeros+1, ZeroUInt);
8953 return GetElementPtrInst::CreateInBounds(Src, Idxs.begin(), Idxs.end(), "",
8954 ((Instruction*) NULL));
8958 if (const VectorType *DestVTy = dyn_cast<VectorType>(DestTy)) {
8959 if (DestVTy->getNumElements() == 1) {
8960 if (!isa<VectorType>(SrcTy)) {
8961 Value *Elem = Builder->CreateBitCast(Src, DestVTy->getElementType());
8962 return InsertElementInst::Create(UndefValue::get(DestTy), Elem,
8963 Constant::getNullValue(Type::getInt32Ty(*Context)));
8965 // FIXME: Canonicalize bitcast(insertelement) -> insertelement(bitcast)
8969 if (const VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy)) {
8970 if (SrcVTy->getNumElements() == 1) {
8971 if (!isa<VectorType>(DestTy)) {
8973 Builder->CreateExtractElement(Src,
8974 Constant::getNullValue(Type::getInt32Ty(*Context)));
8975 return CastInst::Create(Instruction::BitCast, Elem, DestTy);
8980 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(Src)) {
8981 if (SVI->hasOneUse()) {
8982 // Okay, we have (bitconvert (shuffle ..)). Check to see if this is
8983 // a bitconvert to a vector with the same # elts.
8984 if (isa<VectorType>(DestTy) &&
8985 cast<VectorType>(DestTy)->getNumElements() ==
8986 SVI->getType()->getNumElements() &&
8987 SVI->getType()->getNumElements() ==
8988 cast<VectorType>(SVI->getOperand(0)->getType())->getNumElements()) {
8990 // If either of the operands is a cast from CI.getType(), then
8991 // evaluating the shuffle in the casted destination's type will allow
8992 // us to eliminate at least one cast.
8993 if (((Tmp = dyn_cast<CastInst>(SVI->getOperand(0))) &&
8994 Tmp->getOperand(0)->getType() == DestTy) ||
8995 ((Tmp = dyn_cast<CastInst>(SVI->getOperand(1))) &&
8996 Tmp->getOperand(0)->getType() == DestTy)) {
8997 Value *LHS = Builder->CreateBitCast(SVI->getOperand(0), DestTy);
8998 Value *RHS = Builder->CreateBitCast(SVI->getOperand(1), DestTy);
8999 // Return a new shuffle vector. Use the same element ID's, as we
9000 // know the vector types match #elts.
9001 return new ShuffleVectorInst(LHS, RHS, SVI->getOperand(2));
9009 /// GetSelectFoldableOperands - We want to turn code that looks like this:
9011 /// %D = select %cond, %C, %A
9013 /// %C = select %cond, %B, 0
9016 /// Assuming that the specified instruction is an operand to the select, return
9017 /// a bitmask indicating which operands of this instruction are foldable if they
9018 /// equal the other incoming value of the select.
9020 static unsigned GetSelectFoldableOperands(Instruction *I) {
9021 switch (I->getOpcode()) {
9022 case Instruction::Add:
9023 case Instruction::Mul:
9024 case Instruction::And:
9025 case Instruction::Or:
9026 case Instruction::Xor:
9027 return 3; // Can fold through either operand.
9028 case Instruction::Sub: // Can only fold on the amount subtracted.
9029 case Instruction::Shl: // Can only fold on the shift amount.
9030 case Instruction::LShr:
9031 case Instruction::AShr:
9034 return 0; // Cannot fold
9038 /// GetSelectFoldableConstant - For the same transformation as the previous
9039 /// function, return the identity constant that goes into the select.
9040 static Constant *GetSelectFoldableConstant(Instruction *I,
9041 LLVMContext *Context) {
9042 switch (I->getOpcode()) {
9043 default: llvm_unreachable("This cannot happen!");
9044 case Instruction::Add:
9045 case Instruction::Sub:
9046 case Instruction::Or:
9047 case Instruction::Xor:
9048 case Instruction::Shl:
9049 case Instruction::LShr:
9050 case Instruction::AShr:
9051 return Constant::getNullValue(I->getType());
9052 case Instruction::And:
9053 return Constant::getAllOnesValue(I->getType());
9054 case Instruction::Mul:
9055 return ConstantInt::get(I->getType(), 1);
9059 /// FoldSelectOpOp - Here we have (select c, TI, FI), and we know that TI and FI
9060 /// have the same opcode and only one use each. Try to simplify this.
9061 Instruction *InstCombiner::FoldSelectOpOp(SelectInst &SI, Instruction *TI,
9063 if (TI->getNumOperands() == 1) {
9064 // If this is a non-volatile load or a cast from the same type,
9067 if (TI->getOperand(0)->getType() != FI->getOperand(0)->getType())
9070 return 0; // unknown unary op.
9073 // Fold this by inserting a select from the input values.
9074 SelectInst *NewSI = SelectInst::Create(SI.getCondition(), TI->getOperand(0),
9075 FI->getOperand(0), SI.getName()+".v");
9076 InsertNewInstBefore(NewSI, SI);
9077 return CastInst::Create(Instruction::CastOps(TI->getOpcode()), NewSI,
9081 // Only handle binary operators here.
9082 if (!isa<BinaryOperator>(TI))
9085 // Figure out if the operations have any operands in common.
9086 Value *MatchOp, *OtherOpT, *OtherOpF;
9088 if (TI->getOperand(0) == FI->getOperand(0)) {
9089 MatchOp = TI->getOperand(0);
9090 OtherOpT = TI->getOperand(1);
9091 OtherOpF = FI->getOperand(1);
9092 MatchIsOpZero = true;
9093 } else if (TI->getOperand(1) == FI->getOperand(1)) {
9094 MatchOp = TI->getOperand(1);
9095 OtherOpT = TI->getOperand(0);
9096 OtherOpF = FI->getOperand(0);
9097 MatchIsOpZero = false;
9098 } else if (!TI->isCommutative()) {
9100 } else if (TI->getOperand(0) == FI->getOperand(1)) {
9101 MatchOp = TI->getOperand(0);
9102 OtherOpT = TI->getOperand(1);
9103 OtherOpF = FI->getOperand(0);
9104 MatchIsOpZero = true;
9105 } else if (TI->getOperand(1) == FI->getOperand(0)) {
9106 MatchOp = TI->getOperand(1);
9107 OtherOpT = TI->getOperand(0);
9108 OtherOpF = FI->getOperand(1);
9109 MatchIsOpZero = true;
9114 // If we reach here, they do have operations in common.
9115 SelectInst *NewSI = SelectInst::Create(SI.getCondition(), OtherOpT,
9116 OtherOpF, SI.getName()+".v");
9117 InsertNewInstBefore(NewSI, SI);
9119 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(TI)) {
9121 return BinaryOperator::Create(BO->getOpcode(), MatchOp, NewSI);
9123 return BinaryOperator::Create(BO->getOpcode(), NewSI, MatchOp);
9125 llvm_unreachable("Shouldn't get here");
9129 static bool isSelect01(Constant *C1, Constant *C2) {
9130 ConstantInt *C1I = dyn_cast<ConstantInt>(C1);
9133 ConstantInt *C2I = dyn_cast<ConstantInt>(C2);
9136 return (C1I->isZero() || C1I->isOne()) && (C2I->isZero() || C2I->isOne());
9139 /// FoldSelectIntoOp - Try fold the select into one of the operands to
9140 /// facilitate further optimization.
9141 Instruction *InstCombiner::FoldSelectIntoOp(SelectInst &SI, Value *TrueVal,
9143 // See the comment above GetSelectFoldableOperands for a description of the
9144 // transformation we are doing here.
9145 if (Instruction *TVI = dyn_cast<Instruction>(TrueVal)) {
9146 if (TVI->hasOneUse() && TVI->getNumOperands() == 2 &&
9147 !isa<Constant>(FalseVal)) {
9148 if (unsigned SFO = GetSelectFoldableOperands(TVI)) {
9149 unsigned OpToFold = 0;
9150 if ((SFO & 1) && FalseVal == TVI->getOperand(0)) {
9152 } else if ((SFO & 2) && FalseVal == TVI->getOperand(1)) {
9157 Constant *C = GetSelectFoldableConstant(TVI, Context);
9158 Value *OOp = TVI->getOperand(2-OpToFold);
9159 // Avoid creating select between 2 constants unless it's selecting
9161 if (!isa<Constant>(OOp) || isSelect01(C, cast<Constant>(OOp))) {
9162 Instruction *NewSel = SelectInst::Create(SI.getCondition(), OOp, C);
9163 InsertNewInstBefore(NewSel, SI);
9164 NewSel->takeName(TVI);
9165 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(TVI))
9166 return BinaryOperator::Create(BO->getOpcode(), FalseVal, NewSel);
9167 llvm_unreachable("Unknown instruction!!");
9174 if (Instruction *FVI = dyn_cast<Instruction>(FalseVal)) {
9175 if (FVI->hasOneUse() && FVI->getNumOperands() == 2 &&
9176 !isa<Constant>(TrueVal)) {
9177 if (unsigned SFO = GetSelectFoldableOperands(FVI)) {
9178 unsigned OpToFold = 0;
9179 if ((SFO & 1) && TrueVal == FVI->getOperand(0)) {
9181 } else if ((SFO & 2) && TrueVal == FVI->getOperand(1)) {
9186 Constant *C = GetSelectFoldableConstant(FVI, Context);
9187 Value *OOp = FVI->getOperand(2-OpToFold);
9188 // Avoid creating select between 2 constants unless it's selecting
9190 if (!isa<Constant>(OOp) || isSelect01(C, cast<Constant>(OOp))) {
9191 Instruction *NewSel = SelectInst::Create(SI.getCondition(), C, OOp);
9192 InsertNewInstBefore(NewSel, SI);
9193 NewSel->takeName(FVI);
9194 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FVI))
9195 return BinaryOperator::Create(BO->getOpcode(), TrueVal, NewSel);
9196 llvm_unreachable("Unknown instruction!!");
9206 /// visitSelectInstWithICmp - Visit a SelectInst that has an
9207 /// ICmpInst as its first operand.
9209 Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI,
9211 bool Changed = false;
9212 ICmpInst::Predicate Pred = ICI->getPredicate();
9213 Value *CmpLHS = ICI->getOperand(0);
9214 Value *CmpRHS = ICI->getOperand(1);
9215 Value *TrueVal = SI.getTrueValue();
9216 Value *FalseVal = SI.getFalseValue();
9218 // Check cases where the comparison is with a constant that
9219 // can be adjusted to fit the min/max idiom. We may edit ICI in
9220 // place here, so make sure the select is the only user.
9221 if (ICI->hasOneUse())
9222 if (ConstantInt *CI = dyn_cast<ConstantInt>(CmpRHS)) {
9225 case ICmpInst::ICMP_ULT:
9226 case ICmpInst::ICMP_SLT: {
9227 // X < MIN ? T : F --> F
9228 if (CI->isMinValue(Pred == ICmpInst::ICMP_SLT))
9229 return ReplaceInstUsesWith(SI, FalseVal);
9230 // X < C ? X : C-1 --> X > C-1 ? C-1 : X
9231 Constant *AdjustedRHS = SubOne(CI);
9232 if ((CmpLHS == TrueVal && AdjustedRHS == FalseVal) ||
9233 (CmpLHS == FalseVal && AdjustedRHS == TrueVal)) {
9234 Pred = ICmpInst::getSwappedPredicate(Pred);
9235 CmpRHS = AdjustedRHS;
9236 std::swap(FalseVal, TrueVal);
9237 ICI->setPredicate(Pred);
9238 ICI->setOperand(1, CmpRHS);
9239 SI.setOperand(1, TrueVal);
9240 SI.setOperand(2, FalseVal);
9245 case ICmpInst::ICMP_UGT:
9246 case ICmpInst::ICMP_SGT: {
9247 // X > MAX ? T : F --> F
9248 if (CI->isMaxValue(Pred == ICmpInst::ICMP_SGT))
9249 return ReplaceInstUsesWith(SI, FalseVal);
9250 // X > C ? X : C+1 --> X < C+1 ? C+1 : X
9251 Constant *AdjustedRHS = AddOne(CI);
9252 if ((CmpLHS == TrueVal && AdjustedRHS == FalseVal) ||
9253 (CmpLHS == FalseVal && AdjustedRHS == TrueVal)) {
9254 Pred = ICmpInst::getSwappedPredicate(Pred);
9255 CmpRHS = AdjustedRHS;
9256 std::swap(FalseVal, TrueVal);
9257 ICI->setPredicate(Pred);
9258 ICI->setOperand(1, CmpRHS);
9259 SI.setOperand(1, TrueVal);
9260 SI.setOperand(2, FalseVal);
9267 // (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if signed
9268 // (x >s -1) ? -1 : 0 -> ashr x, 31 -> all ones if not signed
9269 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
9270 if (match(TrueVal, m_ConstantInt<-1>()) &&
9271 match(FalseVal, m_ConstantInt<0>()))
9272 Pred = ICI->getPredicate();
9273 else if (match(TrueVal, m_ConstantInt<0>()) &&
9274 match(FalseVal, m_ConstantInt<-1>()))
9275 Pred = CmpInst::getInversePredicate(ICI->getPredicate());
9277 if (Pred != CmpInst::BAD_ICMP_PREDICATE) {
9278 // If we are just checking for a icmp eq of a single bit and zext'ing it
9279 // to an integer, then shift the bit to the appropriate place and then
9280 // cast to integer to avoid the comparison.
9281 const APInt &Op1CV = CI->getValue();
9283 // sext (x <s 0) to i32 --> x>>s31 true if signbit set.
9284 // sext (x >s -1) to i32 --> (x>>s31)^-1 true if signbit clear.
9285 if ((Pred == ICmpInst::ICMP_SLT && Op1CV == 0) ||
9286 (Pred == ICmpInst::ICMP_SGT && Op1CV.isAllOnesValue())) {
9287 Value *In = ICI->getOperand(0);
9288 Value *Sh = ConstantInt::get(In->getType(),
9289 In->getType()->getScalarSizeInBits()-1);
9290 In = InsertNewInstBefore(BinaryOperator::CreateAShr(In, Sh,
9291 In->getName()+".lobit"),
9293 if (In->getType() != SI.getType())
9294 In = CastInst::CreateIntegerCast(In, SI.getType(),
9295 true/*SExt*/, "tmp", ICI);
9297 if (Pred == ICmpInst::ICMP_SGT)
9298 In = InsertNewInstBefore(BinaryOperator::CreateNot(In,
9299 In->getName()+".not"), *ICI);
9301 return ReplaceInstUsesWith(SI, In);
9306 if (CmpLHS == TrueVal && CmpRHS == FalseVal) {
9307 // Transform (X == Y) ? X : Y -> Y
9308 if (Pred == ICmpInst::ICMP_EQ)
9309 return ReplaceInstUsesWith(SI, FalseVal);
9310 // Transform (X != Y) ? X : Y -> X
9311 if (Pred == ICmpInst::ICMP_NE)
9312 return ReplaceInstUsesWith(SI, TrueVal);
9313 /// NOTE: if we wanted to, this is where to detect integer MIN/MAX
9315 } else if (CmpLHS == FalseVal && CmpRHS == TrueVal) {
9316 // Transform (X == Y) ? Y : X -> X
9317 if (Pred == ICmpInst::ICMP_EQ)
9318 return ReplaceInstUsesWith(SI, FalseVal);
9319 // Transform (X != Y) ? Y : X -> Y
9320 if (Pred == ICmpInst::ICMP_NE)
9321 return ReplaceInstUsesWith(SI, TrueVal);
9322 /// NOTE: if we wanted to, this is where to detect integer MIN/MAX
9325 /// NOTE: if we wanted to, this is where to detect integer ABS
9327 return Changed ? &SI : 0;
9331 /// CanSelectOperandBeMappingIntoPredBlock - SI is a select whose condition is a
9332 /// PHI node (but the two may be in different blocks). See if the true/false
9333 /// values (V) are live in all of the predecessor blocks of the PHI. For
9334 /// example, cases like this cannot be mapped:
9336 /// X = phi [ C1, BB1], [C2, BB2]
9338 /// Z = select X, Y, 0
9340 /// because Y is not live in BB1/BB2.
9342 static bool CanSelectOperandBeMappingIntoPredBlock(const Value *V,
9343 const SelectInst &SI) {
9344 // If the value is a non-instruction value like a constant or argument, it
9345 // can always be mapped.
9346 const Instruction *I = dyn_cast<Instruction>(V);
9347 if (I == 0) return true;
9349 // If V is a PHI node defined in the same block as the condition PHI, we can
9350 // map the arguments.
9351 const PHINode *CondPHI = cast<PHINode>(SI.getCondition());
9353 if (const PHINode *VP = dyn_cast<PHINode>(I))
9354 if (VP->getParent() == CondPHI->getParent())
9357 // Otherwise, if the PHI and select are defined in the same block and if V is
9358 // defined in a different block, then we can transform it.
9359 if (SI.getParent() == CondPHI->getParent() &&
9360 I->getParent() != CondPHI->getParent())
9363 // Otherwise we have a 'hard' case and we can't tell without doing more
9364 // detailed dominator based analysis, punt.
9368 Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
9369 Value *CondVal = SI.getCondition();
9370 Value *TrueVal = SI.getTrueValue();
9371 Value *FalseVal = SI.getFalseValue();
9373 // select true, X, Y -> X
9374 // select false, X, Y -> Y
9375 if (ConstantInt *C = dyn_cast<ConstantInt>(CondVal))
9376 return ReplaceInstUsesWith(SI, C->getZExtValue() ? TrueVal : FalseVal);
9378 // select C, X, X -> X
9379 if (TrueVal == FalseVal)
9380 return ReplaceInstUsesWith(SI, TrueVal);
9382 if (isa<UndefValue>(TrueVal)) // select C, undef, X -> X
9383 return ReplaceInstUsesWith(SI, FalseVal);
9384 if (isa<UndefValue>(FalseVal)) // select C, X, undef -> X
9385 return ReplaceInstUsesWith(SI, TrueVal);
9386 if (isa<UndefValue>(CondVal)) { // select undef, X, Y -> X or Y
9387 if (isa<Constant>(TrueVal))
9388 return ReplaceInstUsesWith(SI, TrueVal);
9390 return ReplaceInstUsesWith(SI, FalseVal);
9393 if (SI.getType() == Type::getInt1Ty(*Context)) {
9394 if (ConstantInt *C = dyn_cast<ConstantInt>(TrueVal)) {
9395 if (C->getZExtValue()) {
9396 // Change: A = select B, true, C --> A = or B, C
9397 return BinaryOperator::CreateOr(CondVal, FalseVal);
9399 // Change: A = select B, false, C --> A = and !B, C
9401 InsertNewInstBefore(BinaryOperator::CreateNot(CondVal,
9402 "not."+CondVal->getName()), SI);
9403 return BinaryOperator::CreateAnd(NotCond, FalseVal);
9405 } else if (ConstantInt *C = dyn_cast<ConstantInt>(FalseVal)) {
9406 if (C->getZExtValue() == false) {
9407 // Change: A = select B, C, false --> A = and B, C
9408 return BinaryOperator::CreateAnd(CondVal, TrueVal);
9410 // Change: A = select B, C, true --> A = or !B, C
9412 InsertNewInstBefore(BinaryOperator::CreateNot(CondVal,
9413 "not."+CondVal->getName()), SI);
9414 return BinaryOperator::CreateOr(NotCond, TrueVal);
9418 // select a, b, a -> a&b
9419 // select a, a, b -> a|b
9420 if (CondVal == TrueVal)
9421 return BinaryOperator::CreateOr(CondVal, FalseVal);
9422 else if (CondVal == FalseVal)
9423 return BinaryOperator::CreateAnd(CondVal, TrueVal);
9426 // Selecting between two integer constants?
9427 if (ConstantInt *TrueValC = dyn_cast<ConstantInt>(TrueVal))
9428 if (ConstantInt *FalseValC = dyn_cast<ConstantInt>(FalseVal)) {
9429 // select C, 1, 0 -> zext C to int
9430 if (FalseValC->isZero() && TrueValC->getValue() == 1) {
9431 return CastInst::Create(Instruction::ZExt, CondVal, SI.getType());
9432 } else if (TrueValC->isZero() && FalseValC->getValue() == 1) {
9433 // select C, 0, 1 -> zext !C to int
9435 InsertNewInstBefore(BinaryOperator::CreateNot(CondVal,
9436 "not."+CondVal->getName()), SI);
9437 return CastInst::Create(Instruction::ZExt, NotCond, SI.getType());
9440 if (ICmpInst *IC = dyn_cast<ICmpInst>(SI.getCondition())) {
9441 // If one of the constants is zero (we know they can't both be) and we
9442 // have an icmp instruction with zero, and we have an 'and' with the
9443 // non-constant value, eliminate this whole mess. This corresponds to
9444 // cases like this: ((X & 27) ? 27 : 0)
9445 if (TrueValC->isZero() || FalseValC->isZero())
9446 if (IC->isEquality() && isa<ConstantInt>(IC->getOperand(1)) &&
9447 cast<Constant>(IC->getOperand(1))->isNullValue())
9448 if (Instruction *ICA = dyn_cast<Instruction>(IC->getOperand(0)))
9449 if (ICA->getOpcode() == Instruction::And &&
9450 isa<ConstantInt>(ICA->getOperand(1)) &&
9451 (ICA->getOperand(1) == TrueValC ||
9452 ICA->getOperand(1) == FalseValC) &&
9453 isOneBitSet(cast<ConstantInt>(ICA->getOperand(1)))) {
9454 // Okay, now we know that everything is set up, we just don't
9455 // know whether we have a icmp_ne or icmp_eq and whether the
9456 // true or false val is the zero.
9457 bool ShouldNotVal = !TrueValC->isZero();
9458 ShouldNotVal ^= IC->getPredicate() == ICmpInst::ICMP_NE;
9461 V = InsertNewInstBefore(BinaryOperator::Create(
9462 Instruction::Xor, V, ICA->getOperand(1)), SI);
9463 return ReplaceInstUsesWith(SI, V);
9468 // See if we are selecting two values based on a comparison of the two values.
9469 if (FCmpInst *FCI = dyn_cast<FCmpInst>(CondVal)) {
9470 if (FCI->getOperand(0) == TrueVal && FCI->getOperand(1) == FalseVal) {
9471 // Transform (X == Y) ? X : Y -> Y
9472 if (FCI->getPredicate() == FCmpInst::FCMP_OEQ) {
9473 // This is not safe in general for floating point:
9474 // consider X== -0, Y== +0.
9475 // It becomes safe if either operand is a nonzero constant.
9476 ConstantFP *CFPt, *CFPf;
9477 if (((CFPt = dyn_cast<ConstantFP>(TrueVal)) &&
9478 !CFPt->getValueAPF().isZero()) ||
9479 ((CFPf = dyn_cast<ConstantFP>(FalseVal)) &&
9480 !CFPf->getValueAPF().isZero()))
9481 return ReplaceInstUsesWith(SI, FalseVal);
9483 // Transform (X != Y) ? X : Y -> X
9484 if (FCI->getPredicate() == FCmpInst::FCMP_ONE)
9485 return ReplaceInstUsesWith(SI, TrueVal);
9486 // NOTE: if we wanted to, this is where to detect MIN/MAX
9488 } else if (FCI->getOperand(0) == FalseVal && FCI->getOperand(1) == TrueVal){
9489 // Transform (X == Y) ? Y : X -> X
9490 if (FCI->getPredicate() == FCmpInst::FCMP_OEQ) {
9491 // This is not safe in general for floating point:
9492 // consider X== -0, Y== +0.
9493 // It becomes safe if either operand is a nonzero constant.
9494 ConstantFP *CFPt, *CFPf;
9495 if (((CFPt = dyn_cast<ConstantFP>(TrueVal)) &&
9496 !CFPt->getValueAPF().isZero()) ||
9497 ((CFPf = dyn_cast<ConstantFP>(FalseVal)) &&
9498 !CFPf->getValueAPF().isZero()))
9499 return ReplaceInstUsesWith(SI, FalseVal);
9501 // Transform (X != Y) ? Y : X -> Y
9502 if (FCI->getPredicate() == FCmpInst::FCMP_ONE)
9503 return ReplaceInstUsesWith(SI, TrueVal);
9504 // NOTE: if we wanted to, this is where to detect MIN/MAX
9506 // NOTE: if we wanted to, this is where to detect ABS
9509 // See if we are selecting two values based on a comparison of the two values.
9510 if (ICmpInst *ICI = dyn_cast<ICmpInst>(CondVal))
9511 if (Instruction *Result = visitSelectInstWithICmp(SI, ICI))
9514 if (Instruction *TI = dyn_cast<Instruction>(TrueVal))
9515 if (Instruction *FI = dyn_cast<Instruction>(FalseVal))
9516 if (TI->hasOneUse() && FI->hasOneUse()) {
9517 Instruction *AddOp = 0, *SubOp = 0;
9519 // Turn (select C, (op X, Y), (op X, Z)) -> (op X, (select C, Y, Z))
9520 if (TI->getOpcode() == FI->getOpcode())
9521 if (Instruction *IV = FoldSelectOpOp(SI, TI, FI))
9524 // Turn select C, (X+Y), (X-Y) --> (X+(select C, Y, (-Y))). This is
9525 // even legal for FP.
9526 if ((TI->getOpcode() == Instruction::Sub &&
9527 FI->getOpcode() == Instruction::Add) ||
9528 (TI->getOpcode() == Instruction::FSub &&
9529 FI->getOpcode() == Instruction::FAdd)) {
9530 AddOp = FI; SubOp = TI;
9531 } else if ((FI->getOpcode() == Instruction::Sub &&
9532 TI->getOpcode() == Instruction::Add) ||
9533 (FI->getOpcode() == Instruction::FSub &&
9534 TI->getOpcode() == Instruction::FAdd)) {
9535 AddOp = TI; SubOp = FI;
9539 Value *OtherAddOp = 0;
9540 if (SubOp->getOperand(0) == AddOp->getOperand(0)) {
9541 OtherAddOp = AddOp->getOperand(1);
9542 } else if (SubOp->getOperand(0) == AddOp->getOperand(1)) {
9543 OtherAddOp = AddOp->getOperand(0);
9547 // So at this point we know we have (Y -> OtherAddOp):
9548 // select C, (add X, Y), (sub X, Z)
9549 Value *NegVal; // Compute -Z
9550 if (Constant *C = dyn_cast<Constant>(SubOp->getOperand(1))) {
9551 NegVal = ConstantExpr::getNeg(C);
9553 NegVal = InsertNewInstBefore(
9554 BinaryOperator::CreateNeg(SubOp->getOperand(1),
9558 Value *NewTrueOp = OtherAddOp;
9559 Value *NewFalseOp = NegVal;
9561 std::swap(NewTrueOp, NewFalseOp);
9562 Instruction *NewSel =
9563 SelectInst::Create(CondVal, NewTrueOp,
9564 NewFalseOp, SI.getName() + ".p");
9566 NewSel = InsertNewInstBefore(NewSel, SI);
9567 return BinaryOperator::CreateAdd(SubOp->getOperand(0), NewSel);
9572 // See if we can fold the select into one of our operands.
9573 if (SI.getType()->isInteger()) {
9574 Instruction *FoldI = FoldSelectIntoOp(SI, TrueVal, FalseVal);
9579 // See if we can fold the select into a phi node if the condition is a select.
9580 if (isa<PHINode>(SI.getCondition()))
9581 // The true/false values have to be live in the PHI predecessor's blocks.
9582 if (CanSelectOperandBeMappingIntoPredBlock(TrueVal, SI) &&
9583 CanSelectOperandBeMappingIntoPredBlock(FalseVal, SI))
9584 if (Instruction *NV = FoldOpIntoPhi(SI))
9587 if (BinaryOperator::isNot(CondVal)) {
9588 SI.setOperand(0, BinaryOperator::getNotArgument(CondVal));
9589 SI.setOperand(1, FalseVal);
9590 SI.setOperand(2, TrueVal);
9597 /// EnforceKnownAlignment - If the specified pointer points to an object that
9598 /// we control, modify the object's alignment to PrefAlign. This isn't
9599 /// often possible though. If alignment is important, a more reliable approach
9600 /// is to simply align all global variables and allocation instructions to
9601 /// their preferred alignment from the beginning.
9603 static unsigned EnforceKnownAlignment(Value *V,
9604 unsigned Align, unsigned PrefAlign) {
9606 User *U = dyn_cast<User>(V);
9607 if (!U) return Align;
9609 switch (Operator::getOpcode(U)) {
9611 case Instruction::BitCast:
9612 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
9613 case Instruction::GetElementPtr: {
9614 // If all indexes are zero, it is just the alignment of the base pointer.
9615 bool AllZeroOperands = true;
9616 for (User::op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i)
9617 if (!isa<Constant>(*i) ||
9618 !cast<Constant>(*i)->isNullValue()) {
9619 AllZeroOperands = false;
9623 if (AllZeroOperands) {
9624 // Treat this like a bitcast.
9625 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
9631 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
9632 // If there is a large requested alignment and we can, bump up the alignment
9634 if (!GV->isDeclaration()) {
9635 if (GV->getAlignment() >= PrefAlign)
9636 Align = GV->getAlignment();
9638 GV->setAlignment(PrefAlign);
9642 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
9643 // If there is a requested alignment and if this is an alloca, round up.
9644 if (AI->getAlignment() >= PrefAlign)
9645 Align = AI->getAlignment();
9647 AI->setAlignment(PrefAlign);
9655 /// GetOrEnforceKnownAlignment - If the specified pointer has an alignment that
9656 /// we can determine, return it, otherwise return 0. If PrefAlign is specified,
9657 /// and it is more than the alignment of the ultimate object, see if we can
9658 /// increase the alignment of the ultimate object, making this check succeed.
9659 unsigned InstCombiner::GetOrEnforceKnownAlignment(Value *V,
9660 unsigned PrefAlign) {
9661 unsigned BitWidth = TD ? TD->getTypeSizeInBits(V->getType()) :
9662 sizeof(PrefAlign) * CHAR_BIT;
9663 APInt Mask = APInt::getAllOnesValue(BitWidth);
9664 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
9665 ComputeMaskedBits(V, Mask, KnownZero, KnownOne);
9666 unsigned TrailZ = KnownZero.countTrailingOnes();
9667 unsigned Align = 1u << std::min(BitWidth - 1, TrailZ);
9669 if (PrefAlign > Align)
9670 Align = EnforceKnownAlignment(V, Align, PrefAlign);
9672 // We don't need to make any adjustment.
9676 Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
9677 unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getOperand(1));
9678 unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getOperand(2));
9679 unsigned MinAlign = std::min(DstAlign, SrcAlign);
9680 unsigned CopyAlign = MI->getAlignment();
9682 if (CopyAlign < MinAlign) {
9683 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
9688 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
9690 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getOperand(3));
9691 if (MemOpLength == 0) return 0;
9693 // Source and destination pointer types are always "i8*" for intrinsic. See
9694 // if the size is something we can handle with a single primitive load/store.
9695 // A single load+store correctly handles overlapping memory in the memmove
9697 unsigned Size = MemOpLength->getZExtValue();
9698 if (Size == 0) return MI; // Delete this mem transfer.
9700 if (Size > 8 || (Size&(Size-1)))
9701 return 0; // If not 1/2/4/8 bytes, exit.
9703 // Use an integer load+store unless we can find something better.
9705 PointerType::getUnqual(IntegerType::get(*Context, Size<<3));
9707 // Memcpy forces the use of i8* for the source and destination. That means
9708 // that if you're using memcpy to move one double around, you'll get a cast
9709 // from double* to i8*. We'd much rather use a double load+store rather than
9710 // an i64 load+store, here because this improves the odds that the source or
9711 // dest address will be promotable. See if we can find a better type than the
9712 // integer datatype.
9713 if (Value *Op = getBitCastOperand(MI->getOperand(1))) {
9714 const Type *SrcETy = cast<PointerType>(Op->getType())->getElementType();
9715 if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
9716 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip
9717 // down through these levels if so.
9718 while (!SrcETy->isSingleValueType()) {
9719 if (const StructType *STy = dyn_cast<StructType>(SrcETy)) {
9720 if (STy->getNumElements() == 1)
9721 SrcETy = STy->getElementType(0);
9724 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) {
9725 if (ATy->getNumElements() == 1)
9726 SrcETy = ATy->getElementType();
9733 if (SrcETy->isSingleValueType())
9734 NewPtrTy = PointerType::getUnqual(SrcETy);
9739 // If the memcpy/memmove provides better alignment info than we can
9741 SrcAlign = std::max(SrcAlign, CopyAlign);
9742 DstAlign = std::max(DstAlign, CopyAlign);
9744 Value *Src = Builder->CreateBitCast(MI->getOperand(2), NewPtrTy);
9745 Value *Dest = Builder->CreateBitCast(MI->getOperand(1), NewPtrTy);
9746 Instruction *L = new LoadInst(Src, "tmp", false, SrcAlign);
9747 InsertNewInstBefore(L, *MI);
9748 InsertNewInstBefore(new StoreInst(L, Dest, false, DstAlign), *MI);
9750 // Set the size of the copy to 0, it will be deleted on the next iteration.
9751 MI->setOperand(3, Constant::getNullValue(MemOpLength->getType()));
9755 Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
9756 unsigned Alignment = GetOrEnforceKnownAlignment(MI->getDest());
9757 if (MI->getAlignment() < Alignment) {
9758 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
9763 // Extract the length and alignment and fill if they are constant.
9764 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
9765 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
9766 if (!LenC || !FillC || FillC->getType() != Type::getInt8Ty(*Context))
9768 uint64_t Len = LenC->getZExtValue();
9769 Alignment = MI->getAlignment();
9771 // If the length is zero, this is a no-op
9772 if (Len == 0) return MI; // memset(d,c,0,a) -> noop
9774 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
9775 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
9776 const Type *ITy = IntegerType::get(*Context, Len*8); // n=1 -> i8.
9778 Value *Dest = MI->getDest();
9779 Dest = Builder->CreateBitCast(Dest, PointerType::getUnqual(ITy));
9781 // Alignment 0 is identity for alignment 1 for memset, but not store.
9782 if (Alignment == 0) Alignment = 1;
9784 // Extract the fill value and store.
9785 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
9786 InsertNewInstBefore(new StoreInst(ConstantInt::get(ITy, Fill),
9787 Dest, false, Alignment), *MI);
9789 // Set the size of the copy to 0, it will be deleted on the next iteration.
9790 MI->setLength(Constant::getNullValue(LenC->getType()));
9798 /// visitCallInst - CallInst simplification. This mostly only handles folding
9799 /// of intrinsic instructions. For normal calls, it allows visitCallSite to do
9800 /// the heavy lifting.
9802 Instruction *InstCombiner::visitCallInst(CallInst &CI) {
9803 if (isFreeCall(&CI))
9804 return visitFree(CI);
9806 // If the caller function is nounwind, mark the call as nounwind, even if the
9808 if (CI.getParent()->getParent()->doesNotThrow() &&
9809 !CI.doesNotThrow()) {
9810 CI.setDoesNotThrow();
9814 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
9815 if (!II) return visitCallSite(&CI);
9817 // Intrinsics cannot occur in an invoke, so handle them here instead of in
9819 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
9820 bool Changed = false;
9822 // memmove/cpy/set of zero bytes is a noop.
9823 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
9824 if (NumBytes->isNullValue()) return EraseInstFromFunction(CI);
9826 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
9827 if (CI->getZExtValue() == 1) {
9828 // Replace the instruction with just byte operations. We would
9829 // transform other cases to loads/stores, but we don't know if
9830 // alignment is sufficient.
9834 // If we have a memmove and the source operation is a constant global,
9835 // then the source and dest pointers can't alias, so we can change this
9836 // into a call to memcpy.
9837 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
9838 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
9839 if (GVSrc->isConstant()) {
9840 Module *M = CI.getParent()->getParent()->getParent();
9841 Intrinsic::ID MemCpyID = Intrinsic::memcpy;
9843 Tys[0] = CI.getOperand(3)->getType();
9845 Intrinsic::getDeclaration(M, MemCpyID, Tys, 1));
9849 // memmove(x,x,size) -> noop.
9850 if (MMI->getSource() == MMI->getDest())
9851 return EraseInstFromFunction(CI);
9854 // If we can determine a pointer alignment that is bigger than currently
9855 // set, update the alignment.
9856 if (isa<MemTransferInst>(MI)) {
9857 if (Instruction *I = SimplifyMemTransfer(MI))
9859 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
9860 if (Instruction *I = SimplifyMemSet(MSI))
9864 if (Changed) return II;
9867 switch (II->getIntrinsicID()) {
9869 case Intrinsic::bswap:
9870 // bswap(bswap(x)) -> x
9871 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getOperand(1)))
9872 if (Operand->getIntrinsicID() == Intrinsic::bswap)
9873 return ReplaceInstUsesWith(CI, Operand->getOperand(1));
9875 case Intrinsic::uadd_with_overflow: {
9876 Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
9877 const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
9878 uint32_t BitWidth = IT->getBitWidth();
9879 APInt Mask = APInt::getSignBit(BitWidth);
9880 APInt LHSKnownZero, LHSKnownOne, RHSKnownZero, RHSKnownOne;
9881 ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne);
9882 bool LHSKnownNegative = LHSKnownOne[BitWidth - 1];
9883 bool LHSKnownPositive = LHSKnownZero[BitWidth - 1];
9885 if (LHSKnownNegative || LHSKnownPositive) {
9886 ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne);
9887 bool RHSKnownNegative = RHSKnownOne[BitWidth - 1];
9888 bool RHSKnownPositive = RHSKnownZero[BitWidth - 1];
9889 if (LHSKnownNegative && RHSKnownNegative) {
9890 // The sign bit is set in both cases: this MUST overflow.
9891 // Create a simple add instruction, and insert it into the struct.
9892 Instruction *Add = BinaryOperator::CreateAdd(LHS, RHS, "", &CI);
9895 V[0] = UndefValue::get(LHS->getType());
9896 V[1] = ConstantInt::getTrue(*Context);
9897 Constant *Struct = ConstantStruct::get(*Context, V, 2, false);
9898 return InsertValueInst::Create(Struct, Add, 0);
9901 if (LHSKnownPositive && RHSKnownPositive) {
9902 // The sign bit is clear in both cases: this CANNOT overflow.
9903 // Create a simple add instruction, and insert it into the struct.
9904 Instruction *Add = BinaryOperator::CreateNUWAdd(LHS, RHS, "", &CI);
9907 V[0] = UndefValue::get(LHS->getType());
9908 V[1] = ConstantInt::getFalse(*Context);
9909 Constant *Struct = ConstantStruct::get(*Context, V, 2, false);
9910 return InsertValueInst::Create(Struct, Add, 0);
9914 // FALL THROUGH uadd into sadd
9915 case Intrinsic::sadd_with_overflow:
9916 // Canonicalize constants into the RHS.
9917 if (isa<Constant>(II->getOperand(1)) &&
9918 !isa<Constant>(II->getOperand(2))) {
9919 Value *LHS = II->getOperand(1);
9920 II->setOperand(1, II->getOperand(2));
9921 II->setOperand(2, LHS);
9925 // X + undef -> undef
9926 if (isa<UndefValue>(II->getOperand(2)))
9927 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
9929 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) {
9930 // X + 0 -> {X, false}
9931 if (RHS->isZero()) {
9933 UndefValue::get(II->getType()), ConstantInt::getFalse(*Context)
9935 Constant *Struct = ConstantStruct::get(*Context, V, 2, false);
9936 return InsertValueInst::Create(Struct, II->getOperand(1), 0);
9940 case Intrinsic::usub_with_overflow:
9941 case Intrinsic::ssub_with_overflow:
9942 // undef - X -> undef
9943 // X - undef -> undef
9944 if (isa<UndefValue>(II->getOperand(1)) ||
9945 isa<UndefValue>(II->getOperand(2)))
9946 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
9948 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) {
9949 // X - 0 -> {X, false}
9950 if (RHS->isZero()) {
9952 UndefValue::get(II->getType()), ConstantInt::getFalse(*Context)
9954 Constant *Struct = ConstantStruct::get(*Context, V, 2, false);
9955 return InsertValueInst::Create(Struct, II->getOperand(1), 0);
9959 case Intrinsic::umul_with_overflow:
9960 case Intrinsic::smul_with_overflow:
9961 // Canonicalize constants into the RHS.
9962 if (isa<Constant>(II->getOperand(1)) &&
9963 !isa<Constant>(II->getOperand(2))) {
9964 Value *LHS = II->getOperand(1);
9965 II->setOperand(1, II->getOperand(2));
9966 II->setOperand(2, LHS);
9970 // X * undef -> undef
9971 if (isa<UndefValue>(II->getOperand(2)))
9972 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
9974 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getOperand(2))) {
9975 // X*0 -> {0, false}
9977 return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType()));
9979 // X * 1 -> {X, false}
9980 if (RHSI->equalsInt(1)) {
9982 V[0] = UndefValue::get(II->getType());
9983 V[1] = ConstantInt::getFalse(*Context);
9984 Constant *Struct = ConstantStruct::get(*Context, V, 2, false);
9985 return InsertValueInst::Create(Struct, II->getOperand(1), 1);
9989 case Intrinsic::ppc_altivec_lvx:
9990 case Intrinsic::ppc_altivec_lvxl:
9991 case Intrinsic::x86_sse_loadu_ps:
9992 case Intrinsic::x86_sse2_loadu_pd:
9993 case Intrinsic::x86_sse2_loadu_dq:
9994 // Turn PPC lvx -> load if the pointer is known aligned.
9995 // Turn X86 loadups -> load if the pointer is known aligned.
9996 if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) {
9997 Value *Ptr = Builder->CreateBitCast(II->getOperand(1),
9998 PointerType::getUnqual(II->getType()));
9999 return new LoadInst(Ptr);
10002 case Intrinsic::ppc_altivec_stvx:
10003 case Intrinsic::ppc_altivec_stvxl:
10004 // Turn stvx -> store if the pointer is known aligned.
10005 if (GetOrEnforceKnownAlignment(II->getOperand(2), 16) >= 16) {
10006 const Type *OpPtrTy =
10007 PointerType::getUnqual(II->getOperand(1)->getType());
10008 Value *Ptr = Builder->CreateBitCast(II->getOperand(2), OpPtrTy);
10009 return new StoreInst(II->getOperand(1), Ptr);
10012 case Intrinsic::x86_sse_storeu_ps:
10013 case Intrinsic::x86_sse2_storeu_pd:
10014 case Intrinsic::x86_sse2_storeu_dq:
10015 // Turn X86 storeu -> store if the pointer is known aligned.
10016 if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) {
10017 const Type *OpPtrTy =
10018 PointerType::getUnqual(II->getOperand(2)->getType());
10019 Value *Ptr = Builder->CreateBitCast(II->getOperand(1), OpPtrTy);
10020 return new StoreInst(II->getOperand(2), Ptr);
10024 case Intrinsic::x86_sse_cvttss2si: {
10025 // These intrinsics only demands the 0th element of its input vector. If
10026 // we can simplify the input based on that, do so now.
10028 cast<VectorType>(II->getOperand(1)->getType())->getNumElements();
10029 APInt DemandedElts(VWidth, 1);
10030 APInt UndefElts(VWidth, 0);
10031 if (Value *V = SimplifyDemandedVectorElts(II->getOperand(1), DemandedElts,
10033 II->setOperand(1, V);
10039 case Intrinsic::ppc_altivec_vperm:
10040 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
10041 if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getOperand(3))) {
10042 assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!");
10044 // Check that all of the elements are integer constants or undefs.
10045 bool AllEltsOk = true;
10046 for (unsigned i = 0; i != 16; ++i) {
10047 if (!isa<ConstantInt>(Mask->getOperand(i)) &&
10048 !isa<UndefValue>(Mask->getOperand(i))) {
10055 // Cast the input vectors to byte vectors.
10056 Value *Op0 = Builder->CreateBitCast(II->getOperand(1), Mask->getType());
10057 Value *Op1 = Builder->CreateBitCast(II->getOperand(2), Mask->getType());
10058 Value *Result = UndefValue::get(Op0->getType());
10060 // Only extract each element once.
10061 Value *ExtractedElts[32];
10062 memset(ExtractedElts, 0, sizeof(ExtractedElts));
10064 for (unsigned i = 0; i != 16; ++i) {
10065 if (isa<UndefValue>(Mask->getOperand(i)))
10067 unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue();
10068 Idx &= 31; // Match the hardware behavior.
10070 if (ExtractedElts[Idx] == 0) {
10071 ExtractedElts[Idx] =
10072 Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1,
10073 ConstantInt::get(Type::getInt32Ty(*Context), Idx&15, false),
10077 // Insert this value into the result vector.
10078 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
10079 ConstantInt::get(Type::getInt32Ty(*Context), i, false),
10082 return CastInst::Create(Instruction::BitCast, Result, CI.getType());
10087 case Intrinsic::stackrestore: {
10088 // If the save is right next to the restore, remove the restore. This can
10089 // happen when variable allocas are DCE'd.
10090 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getOperand(1))) {
10091 if (SS->getIntrinsicID() == Intrinsic::stacksave) {
10092 BasicBlock::iterator BI = SS;
10094 return EraseInstFromFunction(CI);
10098 // Scan down this block to see if there is another stack restore in the
10099 // same block without an intervening call/alloca.
10100 BasicBlock::iterator BI = II;
10101 TerminatorInst *TI = II->getParent()->getTerminator();
10102 bool CannotRemove = false;
10103 for (++BI; &*BI != TI; ++BI) {
10104 if (isa<AllocaInst>(BI) || isMalloc(BI)) {
10105 CannotRemove = true;
10108 if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
10109 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
10110 // If there is a stackrestore below this one, remove this one.
10111 if (II->getIntrinsicID() == Intrinsic::stackrestore)
10112 return EraseInstFromFunction(CI);
10113 // Otherwise, ignore the intrinsic.
10115 // If we found a non-intrinsic call, we can't remove the stack
10117 CannotRemove = true;
10123 // If the stack restore is in a return/unwind block and if there are no
10124 // allocas or calls between the restore and the return, nuke the restore.
10125 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<UnwindInst>(TI)))
10126 return EraseInstFromFunction(CI);
10131 return visitCallSite(II);
10134 // InvokeInst simplification
10136 Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
10137 return visitCallSite(&II);
10140 /// isSafeToEliminateVarargsCast - If this cast does not affect the value
10141 /// passed through the varargs area, we can eliminate the use of the cast.
10142 static bool isSafeToEliminateVarargsCast(const CallSite CS,
10143 const CastInst * const CI,
10144 const TargetData * const TD,
10146 if (!CI->isLosslessCast())
10149 // The size of ByVal arguments is derived from the type, so we
10150 // can't change to a type with a different size. If the size were
10151 // passed explicitly we could avoid this check.
10152 if (!CS.paramHasAttr(ix, Attribute::ByVal))
10155 const Type* SrcTy =
10156 cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
10157 const Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
10158 if (!SrcTy->isSized() || !DstTy->isSized())
10160 if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy))
10165 // visitCallSite - Improvements for call and invoke instructions.
10167 Instruction *InstCombiner::visitCallSite(CallSite CS) {
10168 bool Changed = false;
10170 // If the callee is a constexpr cast of a function, attempt to move the cast
10171 // to the arguments of the call/invoke.
10172 if (transformConstExprCastCall(CS)) return 0;
10174 Value *Callee = CS.getCalledValue();
10176 if (Function *CalleeF = dyn_cast<Function>(Callee))
10177 if (CalleeF->getCallingConv() != CS.getCallingConv()) {
10178 Instruction *OldCall = CS.getInstruction();
10179 // If the call and callee calling conventions don't match, this call must
10180 // be unreachable, as the call is undefined.
10181 new StoreInst(ConstantInt::getTrue(*Context),
10182 UndefValue::get(Type::getInt1PtrTy(*Context)),
10184 // If OldCall dues not return void then replaceAllUsesWith undef.
10185 // This allows ValueHandlers and custom metadata to adjust itself.
10186 if (!OldCall->getType()->isVoidTy())
10187 OldCall->replaceAllUsesWith(UndefValue::get(OldCall->getType()));
10188 if (isa<CallInst>(OldCall)) // Not worth removing an invoke here.
10189 return EraseInstFromFunction(*OldCall);
10193 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
10194 // This instruction is not reachable, just remove it. We insert a store to
10195 // undef so that we know that this code is not reachable, despite the fact
10196 // that we can't modify the CFG here.
10197 new StoreInst(ConstantInt::getTrue(*Context),
10198 UndefValue::get(Type::getInt1PtrTy(*Context)),
10199 CS.getInstruction());
10201 // If CS dues not return void then replaceAllUsesWith undef.
10202 // This allows ValueHandlers and custom metadata to adjust itself.
10203 if (!CS.getInstruction()->getType()->isVoidTy())
10204 CS.getInstruction()->
10205 replaceAllUsesWith(UndefValue::get(CS.getInstruction()->getType()));
10207 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
10208 // Don't break the CFG, insert a dummy cond branch.
10209 BranchInst::Create(II->getNormalDest(), II->getUnwindDest(),
10210 ConstantInt::getTrue(*Context), II);
10212 return EraseInstFromFunction(*CS.getInstruction());
10215 if (BitCastInst *BC = dyn_cast<BitCastInst>(Callee))
10216 if (IntrinsicInst *In = dyn_cast<IntrinsicInst>(BC->getOperand(0)))
10217 if (In->getIntrinsicID() == Intrinsic::init_trampoline)
10218 return transformCallThroughTrampoline(CS);
10220 const PointerType *PTy = cast<PointerType>(Callee->getType());
10221 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
10222 if (FTy->isVarArg()) {
10223 int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1);
10224 // See if we can optimize any arguments passed through the varargs area of
10226 for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(),
10227 E = CS.arg_end(); I != E; ++I, ++ix) {
10228 CastInst *CI = dyn_cast<CastInst>(*I);
10229 if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) {
10230 *I = CI->getOperand(0);
10236 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
10237 // Inline asm calls cannot throw - mark them 'nounwind'.
10238 CS.setDoesNotThrow();
10242 return Changed ? CS.getInstruction() : 0;
10245 // transformConstExprCastCall - If the callee is a constexpr cast of a function,
10246 // attempt to move the cast to the arguments of the call/invoke.
10248 bool InstCombiner::transformConstExprCastCall(CallSite CS) {
10249 if (!isa<ConstantExpr>(CS.getCalledValue())) return false;
10250 ConstantExpr *CE = cast<ConstantExpr>(CS.getCalledValue());
10251 if (CE->getOpcode() != Instruction::BitCast ||
10252 !isa<Function>(CE->getOperand(0)))
10254 Function *Callee = cast<Function>(CE->getOperand(0));
10255 Instruction *Caller = CS.getInstruction();
10256 const AttrListPtr &CallerPAL = CS.getAttributes();
10258 // Okay, this is a cast from a function to a different type. Unless doing so
10259 // would cause a type conversion of one of our arguments, change this call to
10260 // be a direct call with arguments casted to the appropriate types.
10262 const FunctionType *FT = Callee->getFunctionType();
10263 const Type *OldRetTy = Caller->getType();
10264 const Type *NewRetTy = FT->getReturnType();
10266 if (isa<StructType>(NewRetTy))
10267 return false; // TODO: Handle multiple return values.
10269 // Check to see if we are changing the return type...
10270 if (OldRetTy != NewRetTy) {
10271 if (Callee->isDeclaration() &&
10272 // Conversion is ok if changing from one pointer type to another or from
10273 // a pointer to an integer of the same size.
10274 !((isa<PointerType>(OldRetTy) || !TD ||
10275 OldRetTy == TD->getIntPtrType(Caller->getContext())) &&
10276 (isa<PointerType>(NewRetTy) || !TD ||
10277 NewRetTy == TD->getIntPtrType(Caller->getContext()))))
10278 return false; // Cannot transform this return value.
10280 if (!Caller->use_empty() &&
10281 // void -> non-void is handled specially
10282 !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy))
10283 return false; // Cannot transform this return value.
10285 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
10286 Attributes RAttrs = CallerPAL.getRetAttributes();
10287 if (RAttrs & Attribute::typeIncompatible(NewRetTy))
10288 return false; // Attribute not compatible with transformed value.
10291 // If the callsite is an invoke instruction, and the return value is used by
10292 // a PHI node in a successor, we cannot change the return type of the call
10293 // because there is no place to put the cast instruction (without breaking
10294 // the critical edge). Bail out in this case.
10295 if (!Caller->use_empty())
10296 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
10297 for (Value::use_iterator UI = II->use_begin(), E = II->use_end();
10299 if (PHINode *PN = dyn_cast<PHINode>(*UI))
10300 if (PN->getParent() == II->getNormalDest() ||
10301 PN->getParent() == II->getUnwindDest())
10305 unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin());
10306 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
10308 CallSite::arg_iterator AI = CS.arg_begin();
10309 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
10310 const Type *ParamTy = FT->getParamType(i);
10311 const Type *ActTy = (*AI)->getType();
10313 if (!CastInst::isCastable(ActTy, ParamTy))
10314 return false; // Cannot transform this parameter value.
10316 if (CallerPAL.getParamAttributes(i + 1)
10317 & Attribute::typeIncompatible(ParamTy))
10318 return false; // Attribute not compatible with transformed value.
10320 // Converting from one pointer type to another or between a pointer and an
10321 // integer of the same size is safe even if we do not have a body.
10322 bool isConvertible = ActTy == ParamTy ||
10323 (TD && ((isa<PointerType>(ParamTy) ||
10324 ParamTy == TD->getIntPtrType(Caller->getContext())) &&
10325 (isa<PointerType>(ActTy) ||
10326 ActTy == TD->getIntPtrType(Caller->getContext()))));
10327 if (Callee->isDeclaration() && !isConvertible) return false;
10330 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg() &&
10331 Callee->isDeclaration())
10332 return false; // Do not delete arguments unless we have a function body.
10334 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
10335 !CallerPAL.isEmpty())
10336 // In this case we have more arguments than the new function type, but we
10337 // won't be dropping them. Check that these extra arguments have attributes
10338 // that are compatible with being a vararg call argument.
10339 for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
10340 if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams())
10342 Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs;
10343 if (PAttrs & Attribute::VarArgsIncompatible)
10347 // Okay, we decided that this is a safe thing to do: go ahead and start
10348 // inserting cast instructions as necessary...
10349 std::vector<Value*> Args;
10350 Args.reserve(NumActualArgs);
10351 SmallVector<AttributeWithIndex, 8> attrVec;
10352 attrVec.reserve(NumCommonArgs);
10354 // Get any return attributes.
10355 Attributes RAttrs = CallerPAL.getRetAttributes();
10357 // If the return value is not being used, the type may not be compatible
10358 // with the existing attributes. Wipe out any problematic attributes.
10359 RAttrs &= ~Attribute::typeIncompatible(NewRetTy);
10361 // Add the new return attributes.
10363 attrVec.push_back(AttributeWithIndex::get(0, RAttrs));
10365 AI = CS.arg_begin();
10366 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
10367 const Type *ParamTy = FT->getParamType(i);
10368 if ((*AI)->getType() == ParamTy) {
10369 Args.push_back(*AI);
10371 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI,
10372 false, ParamTy, false);
10373 Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy, "tmp"));
10376 // Add any parameter attributes.
10377 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
10378 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
10381 // If the function takes more arguments than the call was taking, add them
10383 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
10384 Args.push_back(Constant::getNullValue(FT->getParamType(i)));
10386 // If we are removing arguments to the function, emit an obnoxious warning.
10387 if (FT->getNumParams() < NumActualArgs) {
10388 if (!FT->isVarArg()) {
10389 errs() << "WARNING: While resolving call to function '"
10390 << Callee->getName() << "' arguments were dropped!\n";
10392 // Add all of the arguments in their promoted form to the arg list.
10393 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
10394 const Type *PTy = getPromotedType((*AI)->getType());
10395 if (PTy != (*AI)->getType()) {
10396 // Must promote to pass through va_arg area!
10397 Instruction::CastOps opcode =
10398 CastInst::getCastOpcode(*AI, false, PTy, false);
10399 Args.push_back(Builder->CreateCast(opcode, *AI, PTy, "tmp"));
10401 Args.push_back(*AI);
10404 // Add any parameter attributes.
10405 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
10406 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
10411 if (Attributes FnAttrs = CallerPAL.getFnAttributes())
10412 attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
10414 if (NewRetTy->isVoidTy())
10415 Caller->setName(""); // Void type should not have a name.
10417 const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(),
10421 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
10422 NC = InvokeInst::Create(Callee, II->getNormalDest(), II->getUnwindDest(),
10423 Args.begin(), Args.end(),
10424 Caller->getName(), Caller);
10425 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
10426 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
10428 NC = CallInst::Create(Callee, Args.begin(), Args.end(),
10429 Caller->getName(), Caller);
10430 CallInst *CI = cast<CallInst>(Caller);
10431 if (CI->isTailCall())
10432 cast<CallInst>(NC)->setTailCall();
10433 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
10434 cast<CallInst>(NC)->setAttributes(NewCallerPAL);
10437 // Insert a cast of the return type as necessary.
10439 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
10440 if (!NV->getType()->isVoidTy()) {
10441 Instruction::CastOps opcode = CastInst::getCastOpcode(NC, false,
10443 NV = NC = CastInst::Create(opcode, NC, OldRetTy, "tmp");
10445 // If this is an invoke instruction, we should insert it after the first
10446 // non-phi, instruction in the normal successor block.
10447 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
10448 BasicBlock::iterator I = II->getNormalDest()->getFirstNonPHI();
10449 InsertNewInstBefore(NC, *I);
10451 // Otherwise, it's a call, just insert cast right after the call instr
10452 InsertNewInstBefore(NC, *Caller);
10454 Worklist.AddUsersToWorkList(*Caller);
10456 NV = UndefValue::get(Caller->getType());
10461 if (!Caller->use_empty())
10462 Caller->replaceAllUsesWith(NV);
10464 EraseInstFromFunction(*Caller);
10468 // transformCallThroughTrampoline - Turn a call to a function created by the
10469 // init_trampoline intrinsic into a direct call to the underlying function.
10471 Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) {
10472 Value *Callee = CS.getCalledValue();
10473 const PointerType *PTy = cast<PointerType>(Callee->getType());
10474 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
10475 const AttrListPtr &Attrs = CS.getAttributes();
10477 // If the call already has the 'nest' attribute somewhere then give up -
10478 // otherwise 'nest' would occur twice after splicing in the chain.
10479 if (Attrs.hasAttrSomewhere(Attribute::Nest))
10482 IntrinsicInst *Tramp =
10483 cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0));
10485 Function *NestF = cast<Function>(Tramp->getOperand(2)->stripPointerCasts());
10486 const PointerType *NestFPTy = cast<PointerType>(NestF->getType());
10487 const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
10489 const AttrListPtr &NestAttrs = NestF->getAttributes();
10490 if (!NestAttrs.isEmpty()) {
10491 unsigned NestIdx = 1;
10492 const Type *NestTy = 0;
10493 Attributes NestAttr = Attribute::None;
10495 // Look for a parameter marked with the 'nest' attribute.
10496 for (FunctionType::param_iterator I = NestFTy->param_begin(),
10497 E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
10498 if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) {
10499 // Record the parameter type and any other attributes.
10501 NestAttr = NestAttrs.getParamAttributes(NestIdx);
10506 Instruction *Caller = CS.getInstruction();
10507 std::vector<Value*> NewArgs;
10508 NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1);
10510 SmallVector<AttributeWithIndex, 8> NewAttrs;
10511 NewAttrs.reserve(Attrs.getNumSlots() + 1);
10513 // Insert the nest argument into the call argument list, which may
10514 // mean appending it. Likewise for attributes.
10516 // Add any result attributes.
10517 if (Attributes Attr = Attrs.getRetAttributes())
10518 NewAttrs.push_back(AttributeWithIndex::get(0, Attr));
10522 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
10524 if (Idx == NestIdx) {
10525 // Add the chain argument and attributes.
10526 Value *NestVal = Tramp->getOperand(3);
10527 if (NestVal->getType() != NestTy)
10528 NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller);
10529 NewArgs.push_back(NestVal);
10530 NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr));
10536 // Add the original argument and attributes.
10537 NewArgs.push_back(*I);
10538 if (Attributes Attr = Attrs.getParamAttributes(Idx))
10540 (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr));
10546 // Add any function attributes.
10547 if (Attributes Attr = Attrs.getFnAttributes())
10548 NewAttrs.push_back(AttributeWithIndex::get(~0, Attr));
10550 // The trampoline may have been bitcast to a bogus type (FTy).
10551 // Handle this by synthesizing a new function type, equal to FTy
10552 // with the chain parameter inserted.
10554 std::vector<const Type*> NewTypes;
10555 NewTypes.reserve(FTy->getNumParams()+1);
10557 // Insert the chain's type into the list of parameter types, which may
10558 // mean appending it.
10561 FunctionType::param_iterator I = FTy->param_begin(),
10562 E = FTy->param_end();
10565 if (Idx == NestIdx)
10566 // Add the chain's type.
10567 NewTypes.push_back(NestTy);
10572 // Add the original type.
10573 NewTypes.push_back(*I);
10579 // Replace the trampoline call with a direct call. Let the generic
10580 // code sort out any function type mismatches.
10581 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
10583 Constant *NewCallee =
10584 NestF->getType() == PointerType::getUnqual(NewFTy) ?
10585 NestF : ConstantExpr::getBitCast(NestF,
10586 PointerType::getUnqual(NewFTy));
10587 const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(),
10590 Instruction *NewCaller;
10591 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
10592 NewCaller = InvokeInst::Create(NewCallee,
10593 II->getNormalDest(), II->getUnwindDest(),
10594 NewArgs.begin(), NewArgs.end(),
10595 Caller->getName(), Caller);
10596 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
10597 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
10599 NewCaller = CallInst::Create(NewCallee, NewArgs.begin(), NewArgs.end(),
10600 Caller->getName(), Caller);
10601 if (cast<CallInst>(Caller)->isTailCall())
10602 cast<CallInst>(NewCaller)->setTailCall();
10603 cast<CallInst>(NewCaller)->
10604 setCallingConv(cast<CallInst>(Caller)->getCallingConv());
10605 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
10607 if (!Caller->getType()->isVoidTy())
10608 Caller->replaceAllUsesWith(NewCaller);
10609 Caller->eraseFromParent();
10610 Worklist.Remove(Caller);
10615 // Replace the trampoline call with a direct call. Since there is no 'nest'
10616 // parameter, there is no need to adjust the argument list. Let the generic
10617 // code sort out any function type mismatches.
10618 Constant *NewCallee =
10619 NestF->getType() == PTy ? NestF :
10620 ConstantExpr::getBitCast(NestF, PTy);
10621 CS.setCalledFunction(NewCallee);
10622 return CS.getInstruction();
10625 /// FoldPHIArgBinOpIntoPHI - If we have something like phi [add (a,b), add(a,c)]
10626 /// and if a/b/c and the add's all have a single use, turn this into a phi
10627 /// and a single binop.
10628 Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) {
10629 Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
10630 assert(isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst));
10631 unsigned Opc = FirstInst->getOpcode();
10632 Value *LHSVal = FirstInst->getOperand(0);
10633 Value *RHSVal = FirstInst->getOperand(1);
10635 const Type *LHSType = LHSVal->getType();
10636 const Type *RHSType = RHSVal->getType();
10638 // Scan to see if all operands are the same opcode, and all have one use.
10639 for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
10640 Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i));
10641 if (!I || I->getOpcode() != Opc || !I->hasOneUse() ||
10642 // Verify type of the LHS matches so we don't fold cmp's of different
10643 // types or GEP's with different index types.
10644 I->getOperand(0)->getType() != LHSType ||
10645 I->getOperand(1)->getType() != RHSType)
10648 // If they are CmpInst instructions, check their predicates
10649 if (Opc == Instruction::ICmp || Opc == Instruction::FCmp)
10650 if (cast<CmpInst>(I)->getPredicate() !=
10651 cast<CmpInst>(FirstInst)->getPredicate())
10654 // Keep track of which operand needs a phi node.
10655 if (I->getOperand(0) != LHSVal) LHSVal = 0;
10656 if (I->getOperand(1) != RHSVal) RHSVal = 0;
10659 // If both LHS and RHS would need a PHI, don't do this transformation,
10660 // because it would increase the number of PHIs entering the block,
10661 // which leads to higher register pressure. This is especially
10662 // bad when the PHIs are in the header of a loop.
10663 if (!LHSVal && !RHSVal)
10666 // Otherwise, this is safe to transform!
10668 Value *InLHS = FirstInst->getOperand(0);
10669 Value *InRHS = FirstInst->getOperand(1);
10670 PHINode *NewLHS = 0, *NewRHS = 0;
10672 NewLHS = PHINode::Create(LHSType,
10673 FirstInst->getOperand(0)->getName() + ".pn");
10674 NewLHS->reserveOperandSpace(PN.getNumOperands()/2);
10675 NewLHS->addIncoming(InLHS, PN.getIncomingBlock(0));
10676 InsertNewInstBefore(NewLHS, PN);
10681 NewRHS = PHINode::Create(RHSType,
10682 FirstInst->getOperand(1)->getName() + ".pn");
10683 NewRHS->reserveOperandSpace(PN.getNumOperands()/2);
10684 NewRHS->addIncoming(InRHS, PN.getIncomingBlock(0));
10685 InsertNewInstBefore(NewRHS, PN);
10689 // Add all operands to the new PHIs.
10690 if (NewLHS || NewRHS) {
10691 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
10692 Instruction *InInst = cast<Instruction>(PN.getIncomingValue(i));
10694 Value *NewInLHS = InInst->getOperand(0);
10695 NewLHS->addIncoming(NewInLHS, PN.getIncomingBlock(i));
10698 Value *NewInRHS = InInst->getOperand(1);
10699 NewRHS->addIncoming(NewInRHS, PN.getIncomingBlock(i));
10704 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst))
10705 return BinaryOperator::Create(BinOp->getOpcode(), LHSVal, RHSVal);
10706 CmpInst *CIOp = cast<CmpInst>(FirstInst);
10707 return CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
10711 Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) {
10712 GetElementPtrInst *FirstInst =cast<GetElementPtrInst>(PN.getIncomingValue(0));
10714 SmallVector<Value*, 16> FixedOperands(FirstInst->op_begin(),
10715 FirstInst->op_end());
10716 // This is true if all GEP bases are allocas and if all indices into them are
10718 bool AllBasePointersAreAllocas = true;
10720 // We don't want to replace this phi if the replacement would require
10721 // more than one phi, which leads to higher register pressure. This is
10722 // especially bad when the PHIs are in the header of a loop.
10723 bool NeededPhi = false;
10725 // Scan to see if all operands are the same opcode, and all have one use.
10726 for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
10727 GetElementPtrInst *GEP= dyn_cast<GetElementPtrInst>(PN.getIncomingValue(i));
10728 if (!GEP || !GEP->hasOneUse() || GEP->getType() != FirstInst->getType() ||
10729 GEP->getNumOperands() != FirstInst->getNumOperands())
10732 // Keep track of whether or not all GEPs are of alloca pointers.
10733 if (AllBasePointersAreAllocas &&
10734 (!isa<AllocaInst>(GEP->getOperand(0)) ||
10735 !GEP->hasAllConstantIndices()))
10736 AllBasePointersAreAllocas = false;
10738 // Compare the operand lists.
10739 for (unsigned op = 0, e = FirstInst->getNumOperands(); op != e; ++op) {
10740 if (FirstInst->getOperand(op) == GEP->getOperand(op))
10743 // Don't merge two GEPs when two operands differ (introducing phi nodes)
10744 // if one of the PHIs has a constant for the index. The index may be
10745 // substantially cheaper to compute for the constants, so making it a
10746 // variable index could pessimize the path. This also handles the case
10747 // for struct indices, which must always be constant.
10748 if (isa<ConstantInt>(FirstInst->getOperand(op)) ||
10749 isa<ConstantInt>(GEP->getOperand(op)))
10752 if (FirstInst->getOperand(op)->getType() !=GEP->getOperand(op)->getType())
10755 // If we already needed a PHI for an earlier operand, and another operand
10756 // also requires a PHI, we'd be introducing more PHIs than we're
10757 // eliminating, which increases register pressure on entry to the PHI's
10762 FixedOperands[op] = 0; // Needs a PHI.
10767 // If all of the base pointers of the PHI'd GEPs are from allocas, don't
10768 // bother doing this transformation. At best, this will just save a bit of
10769 // offset calculation, but all the predecessors will have to materialize the
10770 // stack address into a register anyway. We'd actually rather *clone* the
10771 // load up into the predecessors so that we have a load of a gep of an alloca,
10772 // which can usually all be folded into the load.
10773 if (AllBasePointersAreAllocas)
10776 // Otherwise, this is safe to transform. Insert PHI nodes for each operand
10777 // that is variable.
10778 SmallVector<PHINode*, 16> OperandPhis(FixedOperands.size());
10780 bool HasAnyPHIs = false;
10781 for (unsigned i = 0, e = FixedOperands.size(); i != e; ++i) {
10782 if (FixedOperands[i]) continue; // operand doesn't need a phi.
10783 Value *FirstOp = FirstInst->getOperand(i);
10784 PHINode *NewPN = PHINode::Create(FirstOp->getType(),
10785 FirstOp->getName()+".pn");
10786 InsertNewInstBefore(NewPN, PN);
10788 NewPN->reserveOperandSpace(e);
10789 NewPN->addIncoming(FirstOp, PN.getIncomingBlock(0));
10790 OperandPhis[i] = NewPN;
10791 FixedOperands[i] = NewPN;
10796 // Add all operands to the new PHIs.
10798 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
10799 GetElementPtrInst *InGEP =cast<GetElementPtrInst>(PN.getIncomingValue(i));
10800 BasicBlock *InBB = PN.getIncomingBlock(i);
10802 for (unsigned op = 0, e = OperandPhis.size(); op != e; ++op)
10803 if (PHINode *OpPhi = OperandPhis[op])
10804 OpPhi->addIncoming(InGEP->getOperand(op), InBB);
10808 Value *Base = FixedOperands[0];
10809 return cast<GEPOperator>(FirstInst)->isInBounds() ?
10810 GetElementPtrInst::CreateInBounds(Base, FixedOperands.begin()+1,
10811 FixedOperands.end()) :
10812 GetElementPtrInst::Create(Base, FixedOperands.begin()+1,
10813 FixedOperands.end());
10817 /// isSafeAndProfitableToSinkLoad - Return true if we know that it is safe to
10818 /// sink the load out of the block that defines it. This means that it must be
10819 /// obvious the value of the load is not changed from the point of the load to
10820 /// the end of the block it is in.
10822 /// Finally, it is safe, but not profitable, to sink a load targetting a
10823 /// non-address-taken alloca. Doing so will cause us to not promote the alloca
10825 static bool isSafeAndProfitableToSinkLoad(LoadInst *L) {
10826 BasicBlock::iterator BBI = L, E = L->getParent()->end();
10828 for (++BBI; BBI != E; ++BBI)
10829 if (BBI->mayWriteToMemory())
10832 // Check for non-address taken alloca. If not address-taken already, it isn't
10833 // profitable to do this xform.
10834 if (AllocaInst *AI = dyn_cast<AllocaInst>(L->getOperand(0))) {
10835 bool isAddressTaken = false;
10836 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
10838 if (isa<LoadInst>(UI)) continue;
10839 if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) {
10840 // If storing TO the alloca, then the address isn't taken.
10841 if (SI->getOperand(1) == AI) continue;
10843 isAddressTaken = true;
10847 if (!isAddressTaken && AI->isStaticAlloca())
10851 // If this load is a load from a GEP with a constant offset from an alloca,
10852 // then we don't want to sink it. In its present form, it will be
10853 // load [constant stack offset]. Sinking it will cause us to have to
10854 // materialize the stack addresses in each predecessor in a register only to
10855 // do a shared load from register in the successor.
10856 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(L->getOperand(0)))
10857 if (AllocaInst *AI = dyn_cast<AllocaInst>(GEP->getOperand(0)))
10858 if (AI->isStaticAlloca() && GEP->hasAllConstantIndices())
10864 Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) {
10865 LoadInst *FirstLI = cast<LoadInst>(PN.getIncomingValue(0));
10867 // When processing loads, we need to propagate two bits of information to the
10868 // sunk load: whether it is volatile, and what its alignment is. We currently
10869 // don't sink loads when some have their alignment specified and some don't.
10870 // visitLoadInst will propagate an alignment onto the load when TD is around,
10871 // and if TD isn't around, we can't handle the mixed case.
10872 bool isVolatile = FirstLI->isVolatile();
10873 unsigned LoadAlignment = FirstLI->getAlignment();
10875 // We can't sink the load if the loaded value could be modified between the
10876 // load and the PHI.
10877 if (FirstLI->getParent() != PN.getIncomingBlock(0) ||
10878 !isSafeAndProfitableToSinkLoad(FirstLI))
10881 // If the PHI is of volatile loads and the load block has multiple
10882 // successors, sinking it would remove a load of the volatile value from
10883 // the path through the other successor.
10885 FirstLI->getParent()->getTerminator()->getNumSuccessors() != 1)
10888 // Check to see if all arguments are the same operation.
10889 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
10890 LoadInst *LI = dyn_cast<LoadInst>(PN.getIncomingValue(i));
10891 if (!LI || !LI->hasOneUse())
10894 // We can't sink the load if the loaded value could be modified between
10895 // the load and the PHI.
10896 if (LI->isVolatile() != isVolatile ||
10897 LI->getParent() != PN.getIncomingBlock(i) ||
10898 !isSafeAndProfitableToSinkLoad(LI))
10901 // If some of the loads have an alignment specified but not all of them,
10902 // we can't do the transformation.
10903 if ((LoadAlignment != 0) != (LI->getAlignment() != 0))
10906 LoadAlignment = std::min(LoadAlignment, LI->getAlignment());
10908 // If the PHI is of volatile loads and the load block has multiple
10909 // successors, sinking it would remove a load of the volatile value from
10910 // the path through the other successor.
10912 LI->getParent()->getTerminator()->getNumSuccessors() != 1)
10916 // Okay, they are all the same operation. Create a new PHI node of the
10917 // correct type, and PHI together all of the LHS's of the instructions.
10918 PHINode *NewPN = PHINode::Create(FirstLI->getOperand(0)->getType(),
10919 PN.getName()+".in");
10920 NewPN->reserveOperandSpace(PN.getNumOperands()/2);
10922 Value *InVal = FirstLI->getOperand(0);
10923 NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
10925 // Add all operands to the new PHI.
10926 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
10927 Value *NewInVal = cast<LoadInst>(PN.getIncomingValue(i))->getOperand(0);
10928 if (NewInVal != InVal)
10930 NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
10935 // The new PHI unions all of the same values together. This is really
10936 // common, so we handle it intelligently here for compile-time speed.
10940 InsertNewInstBefore(NewPN, PN);
10944 // If this was a volatile load that we are merging, make sure to loop through
10945 // and mark all the input loads as non-volatile. If we don't do this, we will
10946 // insert a new volatile load and the old ones will not be deletable.
10948 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
10949 cast<LoadInst>(PN.getIncomingValue(i))->setVolatile(false);
10951 return new LoadInst(PhiVal, "", isVolatile, LoadAlignment);
10956 /// FoldPHIArgOpIntoPHI - If all operands to a PHI node are the same "unary"
10957 /// operator and they all are only used by the PHI, PHI together their
10958 /// inputs, and do the operation once, to the result of the PHI.
10959 Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
10960 Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
10962 if (isa<GetElementPtrInst>(FirstInst))
10963 return FoldPHIArgGEPIntoPHI(PN);
10964 if (isa<LoadInst>(FirstInst))
10965 return FoldPHIArgLoadIntoPHI(PN);
10967 // Scan the instruction, looking for input operations that can be folded away.
10968 // If all input operands to the phi are the same instruction (e.g. a cast from
10969 // the same type or "+42") we can pull the operation through the PHI, reducing
10970 // code size and simplifying code.
10971 Constant *ConstantOp = 0;
10972 const Type *CastSrcTy = 0;
10974 if (isa<CastInst>(FirstInst)) {
10975 CastSrcTy = FirstInst->getOperand(0)->getType();
10977 // Be careful about transforming integer PHIs. We don't want to pessimize
10978 // the code by turning an i32 into an i1293.
10979 if (isa<IntegerType>(PN.getType()) && isa<IntegerType>(CastSrcTy)) {
10980 if (!ShouldChangeType(PN.getType(), CastSrcTy, TD))
10983 } else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) {
10984 // Can fold binop, compare or shift here if the RHS is a constant,
10985 // otherwise call FoldPHIArgBinOpIntoPHI.
10986 ConstantOp = dyn_cast<Constant>(FirstInst->getOperand(1));
10987 if (ConstantOp == 0)
10988 return FoldPHIArgBinOpIntoPHI(PN);
10990 return 0; // Cannot fold this operation.
10993 // Check to see if all arguments are the same operation.
10994 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
10995 Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i));
10996 if (I == 0 || !I->hasOneUse() || !I->isSameOperationAs(FirstInst))
10999 if (I->getOperand(0)->getType() != CastSrcTy)
11000 return 0; // Cast operation must match.
11001 } else if (I->getOperand(1) != ConstantOp) {
11006 // Okay, they are all the same operation. Create a new PHI node of the
11007 // correct type, and PHI together all of the LHS's of the instructions.
11008 PHINode *NewPN = PHINode::Create(FirstInst->getOperand(0)->getType(),
11009 PN.getName()+".in");
11010 NewPN->reserveOperandSpace(PN.getNumOperands()/2);
11012 Value *InVal = FirstInst->getOperand(0);
11013 NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
11015 // Add all operands to the new PHI.
11016 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
11017 Value *NewInVal = cast<Instruction>(PN.getIncomingValue(i))->getOperand(0);
11018 if (NewInVal != InVal)
11020 NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
11025 // The new PHI unions all of the same values together. This is really
11026 // common, so we handle it intelligently here for compile-time speed.
11030 InsertNewInstBefore(NewPN, PN);
11034 // Insert and return the new operation.
11035 if (CastInst *FirstCI = dyn_cast<CastInst>(FirstInst))
11036 return CastInst::Create(FirstCI->getOpcode(), PhiVal, PN.getType());
11038 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst))
11039 return BinaryOperator::Create(BinOp->getOpcode(), PhiVal, ConstantOp);
11041 CmpInst *CIOp = cast<CmpInst>(FirstInst);
11042 return CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
11043 PhiVal, ConstantOp);
11046 /// DeadPHICycle - Return true if this PHI node is only used by a PHI node cycle
11048 static bool DeadPHICycle(PHINode *PN,
11049 SmallPtrSet<PHINode*, 16> &PotentiallyDeadPHIs) {
11050 if (PN->use_empty()) return true;
11051 if (!PN->hasOneUse()) return false;
11053 // Remember this node, and if we find the cycle, return.
11054 if (!PotentiallyDeadPHIs.insert(PN))
11057 // Don't scan crazily complex things.
11058 if (PotentiallyDeadPHIs.size() == 16)
11061 if (PHINode *PU = dyn_cast<PHINode>(PN->use_back()))
11062 return DeadPHICycle(PU, PotentiallyDeadPHIs);
11067 /// PHIsEqualValue - Return true if this phi node is always equal to
11068 /// NonPhiInVal. This happens with mutually cyclic phi nodes like:
11069 /// z = some value; x = phi (y, z); y = phi (x, z)
11070 static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal,
11071 SmallPtrSet<PHINode*, 16> &ValueEqualPHIs) {
11072 // See if we already saw this PHI node.
11073 if (!ValueEqualPHIs.insert(PN))
11076 // Don't scan crazily complex things.
11077 if (ValueEqualPHIs.size() == 16)
11080 // Scan the operands to see if they are either phi nodes or are equal to
11082 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
11083 Value *Op = PN->getIncomingValue(i);
11084 if (PHINode *OpPN = dyn_cast<PHINode>(Op)) {
11085 if (!PHIsEqualValue(OpPN, NonPhiInVal, ValueEqualPHIs))
11087 } else if (Op != NonPhiInVal)
11096 struct PHIUsageRecord {
11097 unsigned PHIId; // The ID # of the PHI (something determinstic to sort on)
11098 unsigned Shift; // The amount shifted.
11099 Instruction *Inst; // The trunc instruction.
11101 PHIUsageRecord(unsigned pn, unsigned Sh, Instruction *User)
11102 : PHIId(pn), Shift(Sh), Inst(User) {}
11104 bool operator<(const PHIUsageRecord &RHS) const {
11105 if (PHIId < RHS.PHIId) return true;
11106 if (PHIId > RHS.PHIId) return false;
11107 if (Shift < RHS.Shift) return true;
11108 if (Shift > RHS.Shift) return false;
11109 return Inst->getType()->getPrimitiveSizeInBits() <
11110 RHS.Inst->getType()->getPrimitiveSizeInBits();
11114 struct LoweredPHIRecord {
11115 PHINode *PN; // The PHI that was lowered.
11116 unsigned Shift; // The amount shifted.
11117 unsigned Width; // The width extracted.
11119 LoweredPHIRecord(PHINode *pn, unsigned Sh, const Type *Ty)
11120 : PN(pn), Shift(Sh), Width(Ty->getPrimitiveSizeInBits()) {}
11122 // Ctor form used by DenseMap.
11123 LoweredPHIRecord(PHINode *pn, unsigned Sh)
11124 : PN(pn), Shift(Sh), Width(0) {}
11130 struct DenseMapInfo<LoweredPHIRecord> {
11131 static inline LoweredPHIRecord getEmptyKey() {
11132 return LoweredPHIRecord(0, 0);
11134 static inline LoweredPHIRecord getTombstoneKey() {
11135 return LoweredPHIRecord(0, 1);
11137 static unsigned getHashValue(const LoweredPHIRecord &Val) {
11138 return DenseMapInfo<PHINode*>::getHashValue(Val.PN) ^ (Val.Shift>>3) ^
11141 static bool isEqual(const LoweredPHIRecord &LHS,
11142 const LoweredPHIRecord &RHS) {
11143 return LHS.PN == RHS.PN && LHS.Shift == RHS.Shift &&
11144 LHS.Width == RHS.Width;
11146 static bool isPod() { return true; }
11151 /// SliceUpIllegalIntegerPHI - This is an integer PHI and we know that it has an
11152 /// illegal type: see if it is only used by trunc or trunc(lshr) operations. If
11153 /// so, we split the PHI into the various pieces being extracted. This sort of
11154 /// thing is introduced when SROA promotes an aggregate to large integer values.
11156 /// TODO: The user of the trunc may be an bitcast to float/double/vector or an
11157 /// inttoptr. We should produce new PHIs in the right type.
11159 Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
11160 // PHIUsers - Keep track of all of the truncated values extracted from a set
11161 // of PHIs, along with their offset. These are the things we want to rewrite.
11162 SmallVector<PHIUsageRecord, 16> PHIUsers;
11164 // PHIs are often mutually cyclic, so we keep track of a whole set of PHI
11165 // nodes which are extracted from. PHIsToSlice is a set we use to avoid
11166 // revisiting PHIs, PHIsInspected is a ordered list of PHIs that we need to
11167 // check the uses of (to ensure they are all extracts).
11168 SmallVector<PHINode*, 8> PHIsToSlice;
11169 SmallPtrSet<PHINode*, 8> PHIsInspected;
11171 PHIsToSlice.push_back(&FirstPhi);
11172 PHIsInspected.insert(&FirstPhi);
11174 for (unsigned PHIId = 0; PHIId != PHIsToSlice.size(); ++PHIId) {
11175 PHINode *PN = PHIsToSlice[PHIId];
11177 for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end();
11179 Instruction *User = cast<Instruction>(*UI);
11181 // If the user is a PHI, inspect its uses recursively.
11182 if (PHINode *UserPN = dyn_cast<PHINode>(User)) {
11183 if (PHIsInspected.insert(UserPN))
11184 PHIsToSlice.push_back(UserPN);
11188 // Truncates are always ok.
11189 if (isa<TruncInst>(User)) {
11190 PHIUsers.push_back(PHIUsageRecord(PHIId, 0, User));
11194 // Otherwise it must be a lshr which can only be used by one trunc.
11195 if (User->getOpcode() != Instruction::LShr ||
11196 !User->hasOneUse() || !isa<TruncInst>(User->use_back()) ||
11197 !isa<ConstantInt>(User->getOperand(1)))
11200 unsigned Shift = cast<ConstantInt>(User->getOperand(1))->getZExtValue();
11201 PHIUsers.push_back(PHIUsageRecord(PHIId, Shift, User->use_back()));
11205 // If we have no users, they must be all self uses, just nuke the PHI.
11206 if (PHIUsers.empty())
11207 return ReplaceInstUsesWith(FirstPhi, UndefValue::get(FirstPhi.getType()));
11209 // If this phi node is transformable, create new PHIs for all the pieces
11210 // extracted out of it. First, sort the users by their offset and size.
11211 array_pod_sort(PHIUsers.begin(), PHIUsers.end());
11213 DEBUG(errs() << "SLICING UP PHI: " << FirstPhi << '\n';
11214 for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i)
11215 errs() << "AND USER PHI #" << i << ": " << *PHIsToSlice[i] <<'\n';
11218 // PredValues - This is a temporary used when rewriting PHI nodes. It is
11219 // hoisted out here to avoid construction/destruction thrashing.
11220 DenseMap<BasicBlock*, Value*> PredValues;
11222 // ExtractedVals - Each new PHI we introduce is saved here so we don't
11223 // introduce redundant PHIs.
11224 DenseMap<LoweredPHIRecord, PHINode*> ExtractedVals;
11226 for (unsigned UserI = 0, UserE = PHIUsers.size(); UserI != UserE; ++UserI) {
11227 unsigned PHIId = PHIUsers[UserI].PHIId;
11228 PHINode *PN = PHIsToSlice[PHIId];
11229 unsigned Offset = PHIUsers[UserI].Shift;
11230 const Type *Ty = PHIUsers[UserI].Inst->getType();
11234 // If we've already lowered a user like this, reuse the previously lowered
11236 if ((EltPHI = ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)]) == 0) {
11238 // Otherwise, Create the new PHI node for this user.
11239 EltPHI = PHINode::Create(Ty, PN->getName()+".off"+Twine(Offset), PN);
11240 assert(EltPHI->getType() != PN->getType() &&
11241 "Truncate didn't shrink phi?");
11243 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
11244 BasicBlock *Pred = PN->getIncomingBlock(i);
11245 Value *&PredVal = PredValues[Pred];
11247 // If we already have a value for this predecessor, reuse it.
11249 EltPHI->addIncoming(PredVal, Pred);
11253 // Handle the PHI self-reuse case.
11254 Value *InVal = PN->getIncomingValue(i);
11257 EltPHI->addIncoming(PredVal, Pred);
11259 } else if (PHINode *InPHI = dyn_cast<PHINode>(PN)) {
11260 // If the incoming value was a PHI, and if it was one of the PHIs we
11261 // already rewrote it, just use the lowered value.
11262 if (Value *Res = ExtractedVals[LoweredPHIRecord(InPHI, Offset, Ty)]) {
11264 EltPHI->addIncoming(PredVal, Pred);
11269 // Otherwise, do an extract in the predecessor.
11270 Builder->SetInsertPoint(Pred, Pred->getTerminator());
11271 Value *Res = InVal;
11273 Res = Builder->CreateLShr(Res, ConstantInt::get(InVal->getType(),
11274 Offset), "extract");
11275 Res = Builder->CreateTrunc(Res, Ty, "extract.t");
11277 EltPHI->addIncoming(Res, Pred);
11279 // If the incoming value was a PHI, and if it was one of the PHIs we are
11280 // rewriting, we will ultimately delete the code we inserted. This
11281 // means we need to revisit that PHI to make sure we extract out the
11283 if (PHINode *OldInVal = dyn_cast<PHINode>(PN->getIncomingValue(i)))
11284 if (PHIsInspected.count(OldInVal)) {
11285 unsigned RefPHIId = std::find(PHIsToSlice.begin(),PHIsToSlice.end(),
11286 OldInVal)-PHIsToSlice.begin();
11287 PHIUsers.push_back(PHIUsageRecord(RefPHIId, Offset,
11288 cast<Instruction>(Res)));
11292 PredValues.clear();
11294 DEBUG(errs() << " Made element PHI for offset " << Offset << ": "
11295 << *EltPHI << '\n');
11296 ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)] = EltPHI;
11299 // Replace the use of this piece with the PHI node.
11300 ReplaceInstUsesWith(*PHIUsers[UserI].Inst, EltPHI);
11303 // Replace all the remaining uses of the PHI nodes (self uses and the lshrs)
11305 Value *Undef = UndefValue::get(FirstPhi.getType());
11306 for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i)
11307 ReplaceInstUsesWith(*PHIsToSlice[i], Undef);
11308 return ReplaceInstUsesWith(FirstPhi, Undef);
11311 // PHINode simplification
11313 Instruction *InstCombiner::visitPHINode(PHINode &PN) {
11314 // If LCSSA is around, don't mess with Phi nodes
11315 if (MustPreserveLCSSA) return 0;
11317 if (Value *V = PN.hasConstantValue())
11318 return ReplaceInstUsesWith(PN, V);
11320 // If all PHI operands are the same operation, pull them through the PHI,
11321 // reducing code size.
11322 if (isa<Instruction>(PN.getIncomingValue(0)) &&
11323 isa<Instruction>(PN.getIncomingValue(1)) &&
11324 cast<Instruction>(PN.getIncomingValue(0))->getOpcode() ==
11325 cast<Instruction>(PN.getIncomingValue(1))->getOpcode() &&
11326 // FIXME: The hasOneUse check will fail for PHIs that use the value more
11327 // than themselves more than once.
11328 PN.getIncomingValue(0)->hasOneUse())
11329 if (Instruction *Result = FoldPHIArgOpIntoPHI(PN))
11332 // If this is a trivial cycle in the PHI node graph, remove it. Basically, if
11333 // this PHI only has a single use (a PHI), and if that PHI only has one use (a
11334 // PHI)... break the cycle.
11335 if (PN.hasOneUse()) {
11336 Instruction *PHIUser = cast<Instruction>(PN.use_back());
11337 if (PHINode *PU = dyn_cast<PHINode>(PHIUser)) {
11338 SmallPtrSet<PHINode*, 16> PotentiallyDeadPHIs;
11339 PotentiallyDeadPHIs.insert(&PN);
11340 if (DeadPHICycle(PU, PotentiallyDeadPHIs))
11341 return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType()));
11344 // If this phi has a single use, and if that use just computes a value for
11345 // the next iteration of a loop, delete the phi. This occurs with unused
11346 // induction variables, e.g. "for (int j = 0; ; ++j);". Detecting this
11347 // common case here is good because the only other things that catch this
11348 // are induction variable analysis (sometimes) and ADCE, which is only run
11350 if (PHIUser->hasOneUse() &&
11351 (isa<BinaryOperator>(PHIUser) || isa<GetElementPtrInst>(PHIUser)) &&
11352 PHIUser->use_back() == &PN) {
11353 return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType()));
11357 // We sometimes end up with phi cycles that non-obviously end up being the
11358 // same value, for example:
11359 // z = some value; x = phi (y, z); y = phi (x, z)
11360 // where the phi nodes don't necessarily need to be in the same block. Do a
11361 // quick check to see if the PHI node only contains a single non-phi value, if
11362 // so, scan to see if the phi cycle is actually equal to that value.
11364 unsigned InValNo = 0, NumOperandVals = PN.getNumIncomingValues();
11365 // Scan for the first non-phi operand.
11366 while (InValNo != NumOperandVals &&
11367 isa<PHINode>(PN.getIncomingValue(InValNo)))
11370 if (InValNo != NumOperandVals) {
11371 Value *NonPhiInVal = PN.getOperand(InValNo);
11373 // Scan the rest of the operands to see if there are any conflicts, if so
11374 // there is no need to recursively scan other phis.
11375 for (++InValNo; InValNo != NumOperandVals; ++InValNo) {
11376 Value *OpVal = PN.getIncomingValue(InValNo);
11377 if (OpVal != NonPhiInVal && !isa<PHINode>(OpVal))
11381 // If we scanned over all operands, then we have one unique value plus
11382 // phi values. Scan PHI nodes to see if they all merge in each other or
11384 if (InValNo == NumOperandVals) {
11385 SmallPtrSet<PHINode*, 16> ValueEqualPHIs;
11386 if (PHIsEqualValue(&PN, NonPhiInVal, ValueEqualPHIs))
11387 return ReplaceInstUsesWith(PN, NonPhiInVal);
11392 // If there are multiple PHIs, sort their operands so that they all list
11393 // the blocks in the same order. This will help identical PHIs be eliminated
11394 // by other passes. Other passes shouldn't depend on this for correctness
11396 PHINode *FirstPN = cast<PHINode>(PN.getParent()->begin());
11397 if (&PN != FirstPN)
11398 for (unsigned i = 0, e = FirstPN->getNumIncomingValues(); i != e; ++i) {
11399 BasicBlock *BBA = PN.getIncomingBlock(i);
11400 BasicBlock *BBB = FirstPN->getIncomingBlock(i);
11402 Value *VA = PN.getIncomingValue(i);
11403 unsigned j = PN.getBasicBlockIndex(BBB);
11404 Value *VB = PN.getIncomingValue(j);
11405 PN.setIncomingBlock(i, BBB);
11406 PN.setIncomingValue(i, VB);
11407 PN.setIncomingBlock(j, BBA);
11408 PN.setIncomingValue(j, VA);
11409 // NOTE: Instcombine normally would want us to "return &PN" if we
11410 // modified any of the operands of an instruction. However, since we
11411 // aren't adding or removing uses (just rearranging them) we don't do
11412 // this in this case.
11416 // If this is an integer PHI and we know that it has an illegal type, see if
11417 // it is only used by trunc or trunc(lshr) operations. If so, we split the
11418 // PHI into the various pieces being extracted. This sort of thing is
11419 // introduced when SROA promotes an aggregate to a single large integer type.
11420 if (isa<IntegerType>(PN.getType()) && TD &&
11421 !TD->isLegalInteger(PN.getType()->getPrimitiveSizeInBits()))
11422 if (Instruction *Res = SliceUpIllegalIntegerPHI(PN))
11428 Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
11429 Value *PtrOp = GEP.getOperand(0);
11430 // Eliminate 'getelementptr %P, i32 0' and 'getelementptr %P', they are noops.
11431 if (GEP.getNumOperands() == 1)
11432 return ReplaceInstUsesWith(GEP, PtrOp);
11434 if (isa<UndefValue>(GEP.getOperand(0)))
11435 return ReplaceInstUsesWith(GEP, UndefValue::get(GEP.getType()));
11437 bool HasZeroPointerIndex = false;
11438 if (Constant *C = dyn_cast<Constant>(GEP.getOperand(1)))
11439 HasZeroPointerIndex = C->isNullValue();
11441 if (GEP.getNumOperands() == 2 && HasZeroPointerIndex)
11442 return ReplaceInstUsesWith(GEP, PtrOp);
11444 // Eliminate unneeded casts for indices.
11446 bool MadeChange = false;
11447 unsigned PtrSize = TD->getPointerSizeInBits();
11449 gep_type_iterator GTI = gep_type_begin(GEP);
11450 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();
11451 I != E; ++I, ++GTI) {
11452 if (!isa<SequentialType>(*GTI)) continue;
11454 // If we are using a wider index than needed for this platform, shrink it
11455 // to what we need. If narrower, sign-extend it to what we need. This
11456 // explicit cast can make subsequent optimizations more obvious.
11457 unsigned OpBits = cast<IntegerType>((*I)->getType())->getBitWidth();
11458 if (OpBits == PtrSize)
11461 *I = Builder->CreateIntCast(*I, TD->getIntPtrType(GEP.getContext()),true);
11464 if (MadeChange) return &GEP;
11467 // Combine Indices - If the source pointer to this getelementptr instruction
11468 // is a getelementptr instruction, combine the indices of the two
11469 // getelementptr instructions into a single instruction.
11471 if (GEPOperator *Src = dyn_cast<GEPOperator>(PtrOp)) {
11472 // Note that if our source is a gep chain itself that we wait for that
11473 // chain to be resolved before we perform this transformation. This
11474 // avoids us creating a TON of code in some cases.
11476 if (GetElementPtrInst *SrcGEP =
11477 dyn_cast<GetElementPtrInst>(Src->getOperand(0)))
11478 if (SrcGEP->getNumOperands() == 2)
11479 return 0; // Wait until our source is folded to completion.
11481 SmallVector<Value*, 8> Indices;
11483 // Find out whether the last index in the source GEP is a sequential idx.
11484 bool EndsWithSequential = false;
11485 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
11487 EndsWithSequential = !isa<StructType>(*I);
11489 // Can we combine the two pointer arithmetics offsets?
11490 if (EndsWithSequential) {
11491 // Replace: gep (gep %P, long B), long A, ...
11492 // With: T = long A+B; gep %P, T, ...
11495 Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
11496 Value *GO1 = GEP.getOperand(1);
11497 if (SO1 == Constant::getNullValue(SO1->getType())) {
11499 } else if (GO1 == Constant::getNullValue(GO1->getType())) {
11502 // If they aren't the same type, then the input hasn't been processed
11503 // by the loop above yet (which canonicalizes sequential index types to
11504 // intptr_t). Just avoid transforming this until the input has been
11506 if (SO1->getType() != GO1->getType())
11508 Sum = Builder->CreateAdd(SO1, GO1, PtrOp->getName()+".sum");
11511 // Update the GEP in place if possible.
11512 if (Src->getNumOperands() == 2) {
11513 GEP.setOperand(0, Src->getOperand(0));
11514 GEP.setOperand(1, Sum);
11517 Indices.append(Src->op_begin()+1, Src->op_end()-1);
11518 Indices.push_back(Sum);
11519 Indices.append(GEP.op_begin()+2, GEP.op_end());
11520 } else if (isa<Constant>(*GEP.idx_begin()) &&
11521 cast<Constant>(*GEP.idx_begin())->isNullValue() &&
11522 Src->getNumOperands() != 1) {
11523 // Otherwise we can do the fold if the first index of the GEP is a zero
11524 Indices.append(Src->op_begin()+1, Src->op_end());
11525 Indices.append(GEP.idx_begin()+1, GEP.idx_end());
11528 if (!Indices.empty())
11529 return (cast<GEPOperator>(&GEP)->isInBounds() &&
11530 Src->isInBounds()) ?
11531 GetElementPtrInst::CreateInBounds(Src->getOperand(0), Indices.begin(),
11532 Indices.end(), GEP.getName()) :
11533 GetElementPtrInst::Create(Src->getOperand(0), Indices.begin(),
11534 Indices.end(), GEP.getName());
11537 // Handle gep(bitcast x) and gep(gep x, 0, 0, 0).
11538 if (Value *X = getBitCastOperand(PtrOp)) {
11539 assert(isa<PointerType>(X->getType()) && "Must be cast from pointer");
11541 // If the input bitcast is actually "bitcast(bitcast(x))", then we don't
11542 // want to change the gep until the bitcasts are eliminated.
11543 if (getBitCastOperand(X)) {
11544 Worklist.AddValue(PtrOp);
11548 // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ...
11549 // into : GEP [10 x i8]* X, i32 0, ...
11551 // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ...
11552 // into : GEP i8* X, ...
11554 // This occurs when the program declares an array extern like "int X[];"
11555 if (HasZeroPointerIndex) {
11556 const PointerType *CPTy = cast<PointerType>(PtrOp->getType());
11557 const PointerType *XTy = cast<PointerType>(X->getType());
11558 if (const ArrayType *CATy =
11559 dyn_cast<ArrayType>(CPTy->getElementType())) {
11560 // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ?
11561 if (CATy->getElementType() == XTy->getElementType()) {
11562 // -> GEP i8* X, ...
11563 SmallVector<Value*, 8> Indices(GEP.idx_begin()+1, GEP.idx_end());
11564 return cast<GEPOperator>(&GEP)->isInBounds() ?
11565 GetElementPtrInst::CreateInBounds(X, Indices.begin(), Indices.end(),
11567 GetElementPtrInst::Create(X, Indices.begin(), Indices.end(),
11571 if (const ArrayType *XATy = dyn_cast<ArrayType>(XTy->getElementType())){
11572 // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
11573 if (CATy->getElementType() == XATy->getElementType()) {
11574 // -> GEP [10 x i8]* X, i32 0, ...
11575 // At this point, we know that the cast source type is a pointer
11576 // to an array of the same type as the destination pointer
11577 // array. Because the array type is never stepped over (there
11578 // is a leading zero) we can fold the cast into this GEP.
11579 GEP.setOperand(0, X);
11584 } else if (GEP.getNumOperands() == 2) {
11585 // Transform things like:
11586 // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V
11587 // into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
11588 const Type *SrcElTy = cast<PointerType>(X->getType())->getElementType();
11589 const Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType();
11590 if (TD && isa<ArrayType>(SrcElTy) &&
11591 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()) ==
11592 TD->getTypeAllocSize(ResElTy)) {
11594 Idx[0] = Constant::getNullValue(Type::getInt32Ty(*Context));
11595 Idx[1] = GEP.getOperand(1);
11596 Value *NewGEP = cast<GEPOperator>(&GEP)->isInBounds() ?
11597 Builder->CreateInBoundsGEP(X, Idx, Idx + 2, GEP.getName()) :
11598 Builder->CreateGEP(X, Idx, Idx + 2, GEP.getName());
11599 // V and GEP are both pointer types --> BitCast
11600 return new BitCastInst(NewGEP, GEP.getType());
11603 // Transform things like:
11604 // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
11605 // (where tmp = 8*tmp2) into:
11606 // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
11608 if (TD && isa<ArrayType>(SrcElTy) && ResElTy == Type::getInt8Ty(*Context)) {
11609 uint64_t ArrayEltSize =
11610 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType());
11612 // Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We
11613 // allow either a mul, shift, or constant here.
11615 ConstantInt *Scale = 0;
11616 if (ArrayEltSize == 1) {
11617 NewIdx = GEP.getOperand(1);
11618 Scale = ConstantInt::get(cast<IntegerType>(NewIdx->getType()), 1);
11619 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP.getOperand(1))) {
11620 NewIdx = ConstantInt::get(CI->getType(), 1);
11622 } else if (Instruction *Inst =dyn_cast<Instruction>(GEP.getOperand(1))){
11623 if (Inst->getOpcode() == Instruction::Shl &&
11624 isa<ConstantInt>(Inst->getOperand(1))) {
11625 ConstantInt *ShAmt = cast<ConstantInt>(Inst->getOperand(1));
11626 uint32_t ShAmtVal = ShAmt->getLimitedValue(64);
11627 Scale = ConstantInt::get(cast<IntegerType>(Inst->getType()),
11629 NewIdx = Inst->getOperand(0);
11630 } else if (Inst->getOpcode() == Instruction::Mul &&
11631 isa<ConstantInt>(Inst->getOperand(1))) {
11632 Scale = cast<ConstantInt>(Inst->getOperand(1));
11633 NewIdx = Inst->getOperand(0);
11637 // If the index will be to exactly the right offset with the scale taken
11638 // out, perform the transformation. Note, we don't know whether Scale is
11639 // signed or not. We'll use unsigned version of division/modulo
11640 // operation after making sure Scale doesn't have the sign bit set.
11641 if (ArrayEltSize && Scale && Scale->getSExtValue() >= 0LL &&
11642 Scale->getZExtValue() % ArrayEltSize == 0) {
11643 Scale = ConstantInt::get(Scale->getType(),
11644 Scale->getZExtValue() / ArrayEltSize);
11645 if (Scale->getZExtValue() != 1) {
11646 Constant *C = ConstantExpr::getIntegerCast(Scale, NewIdx->getType(),
11648 NewIdx = Builder->CreateMul(NewIdx, C, "idxscale");
11651 // Insert the new GEP instruction.
11653 Idx[0] = Constant::getNullValue(Type::getInt32Ty(*Context));
11655 Value *NewGEP = cast<GEPOperator>(&GEP)->isInBounds() ?
11656 Builder->CreateInBoundsGEP(X, Idx, Idx + 2, GEP.getName()) :
11657 Builder->CreateGEP(X, Idx, Idx + 2, GEP.getName());
11658 // The NewGEP must be pointer typed, so must the old one -> BitCast
11659 return new BitCastInst(NewGEP, GEP.getType());
11665 /// See if we can simplify:
11666 /// X = bitcast A* to B*
11667 /// Y = gep X, <...constant indices...>
11668 /// into a gep of the original struct. This is important for SROA and alias
11669 /// analysis of unions. If "A" is also a bitcast, wait for A/X to be merged.
11670 if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) {
11672 !isa<BitCastInst>(BCI->getOperand(0)) && GEP.hasAllConstantIndices()) {
11673 // Determine how much the GEP moves the pointer. We are guaranteed to get
11674 // a constant back from EmitGEPOffset.
11675 ConstantInt *OffsetV = cast<ConstantInt>(EmitGEPOffset(&GEP, *this));
11676 int64_t Offset = OffsetV->getSExtValue();
11678 // If this GEP instruction doesn't move the pointer, just replace the GEP
11679 // with a bitcast of the real input to the dest type.
11681 // If the bitcast is of an allocation, and the allocation will be
11682 // converted to match the type of the cast, don't touch this.
11683 if (isa<AllocaInst>(BCI->getOperand(0)) ||
11684 isMalloc(BCI->getOperand(0))) {
11685 // See if the bitcast simplifies, if so, don't nuke this GEP yet.
11686 if (Instruction *I = visitBitCast(*BCI)) {
11689 BCI->getParent()->getInstList().insert(BCI, I);
11690 ReplaceInstUsesWith(*BCI, I);
11695 return new BitCastInst(BCI->getOperand(0), GEP.getType());
11698 // Otherwise, if the offset is non-zero, we need to find out if there is a
11699 // field at Offset in 'A's type. If so, we can pull the cast through the
11701 SmallVector<Value*, 8> NewIndices;
11703 cast<PointerType>(BCI->getOperand(0)->getType())->getElementType();
11704 if (FindElementAtOffset(InTy, Offset, NewIndices, TD, Context)) {
11705 Value *NGEP = cast<GEPOperator>(&GEP)->isInBounds() ?
11706 Builder->CreateInBoundsGEP(BCI->getOperand(0), NewIndices.begin(),
11707 NewIndices.end()) :
11708 Builder->CreateGEP(BCI->getOperand(0), NewIndices.begin(),
11711 if (NGEP->getType() == GEP.getType())
11712 return ReplaceInstUsesWith(GEP, NGEP);
11713 NGEP->takeName(&GEP);
11714 return new BitCastInst(NGEP, GEP.getType());
11722 Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
11723 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
11724 if (AI.isArrayAllocation()) { // Check C != 1
11725 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
11726 const Type *NewTy =
11727 ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
11728 assert(isa<AllocaInst>(AI) && "Unknown type of allocation inst!");
11729 AllocaInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
11730 New->setAlignment(AI.getAlignment());
11732 // Scan to the end of the allocation instructions, to skip over a block of
11733 // allocas if possible...also skip interleaved debug info
11735 BasicBlock::iterator It = New;
11736 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It;
11738 // Now that I is pointing to the first non-allocation-inst in the block,
11739 // insert our getelementptr instruction...
11741 Value *NullIdx = Constant::getNullValue(Type::getInt32Ty(*Context));
11745 Value *V = GetElementPtrInst::CreateInBounds(New, Idx, Idx + 2,
11746 New->getName()+".sub", It);
11748 // Now make everything use the getelementptr instead of the original
11750 return ReplaceInstUsesWith(AI, V);
11751 } else if (isa<UndefValue>(AI.getArraySize())) {
11752 return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
11756 if (TD && isa<AllocaInst>(AI) && AI.getAllocatedType()->isSized()) {
11757 // If alloca'ing a zero byte object, replace the alloca with a null pointer.
11758 // Note that we only do this for alloca's, because malloc should allocate
11759 // and return a unique pointer, even for a zero byte allocation.
11760 if (TD->getTypeAllocSize(AI.getAllocatedType()) == 0)
11761 return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
11763 // If the alignment is 0 (unspecified), assign it the preferred alignment.
11764 if (AI.getAlignment() == 0)
11765 AI.setAlignment(TD->getPrefTypeAlignment(AI.getAllocatedType()));
11771 Instruction *InstCombiner::visitFree(Instruction &FI) {
11772 Value *Op = FI.getOperand(1);
11774 // free undef -> unreachable.
11775 if (isa<UndefValue>(Op)) {
11776 // Insert a new store to null because we cannot modify the CFG here.
11777 new StoreInst(ConstantInt::getTrue(*Context),
11778 UndefValue::get(Type::getInt1PtrTy(*Context)), &FI);
11779 return EraseInstFromFunction(FI);
11782 // If we have 'free null' delete the instruction. This can happen in stl code
11783 // when lots of inlining happens.
11784 if (isa<ConstantPointerNull>(Op))
11785 return EraseInstFromFunction(FI);
11787 // If we have a malloc call whose only use is a free call, delete both.
11788 if (isMalloc(Op)) {
11789 if (CallInst* CI = extractMallocCallFromBitCast(Op)) {
11790 if (Op->hasOneUse() && CI->hasOneUse()) {
11791 EraseInstFromFunction(FI);
11792 EraseInstFromFunction(*CI);
11793 return EraseInstFromFunction(*cast<Instruction>(Op));
11796 // Op is a call to malloc
11797 if (Op->hasOneUse()) {
11798 EraseInstFromFunction(FI);
11799 return EraseInstFromFunction(*cast<Instruction>(Op));
11807 /// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible.
11808 static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
11809 const TargetData *TD) {
11810 User *CI = cast<User>(LI.getOperand(0));
11811 Value *CastOp = CI->getOperand(0);
11812 LLVMContext *Context = IC.getContext();
11814 const PointerType *DestTy = cast<PointerType>(CI->getType());
11815 const Type *DestPTy = DestTy->getElementType();
11816 if (const PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType())) {
11818 // If the address spaces don't match, don't eliminate the cast.
11819 if (DestTy->getAddressSpace() != SrcTy->getAddressSpace())
11822 const Type *SrcPTy = SrcTy->getElementType();
11824 if (DestPTy->isInteger() || isa<PointerType>(DestPTy) ||
11825 isa<VectorType>(DestPTy)) {
11826 // If the source is an array, the code below will not succeed. Check to
11827 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
11829 if (const ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy))
11830 if (Constant *CSrc = dyn_cast<Constant>(CastOp))
11831 if (ASrcTy->getNumElements() != 0) {
11833 Idxs[0] = Constant::getNullValue(Type::getInt32Ty(*Context));
11835 CastOp = ConstantExpr::getGetElementPtr(CSrc, Idxs, 2);
11836 SrcTy = cast<PointerType>(CastOp->getType());
11837 SrcPTy = SrcTy->getElementType();
11840 if (IC.getTargetData() &&
11841 (SrcPTy->isInteger() || isa<PointerType>(SrcPTy) ||
11842 isa<VectorType>(SrcPTy)) &&
11843 // Do not allow turning this into a load of an integer, which is then
11844 // casted to a pointer, this pessimizes pointer analysis a lot.
11845 (isa<PointerType>(SrcPTy) == isa<PointerType>(LI.getType())) &&
11846 IC.getTargetData()->getTypeSizeInBits(SrcPTy) ==
11847 IC.getTargetData()->getTypeSizeInBits(DestPTy)) {
11849 // Okay, we are casting from one integer or pointer type to another of
11850 // the same size. Instead of casting the pointer before the load, cast
11851 // the result of the loaded value.
11853 IC.Builder->CreateLoad(CastOp, LI.isVolatile(), CI->getName());
11854 // Now cast the result of the load.
11855 return new BitCastInst(NewLoad, LI.getType());
11862 Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
11863 Value *Op = LI.getOperand(0);
11865 // Attempt to improve the alignment.
11867 unsigned KnownAlign =
11868 GetOrEnforceKnownAlignment(Op, TD->getPrefTypeAlignment(LI.getType()));
11870 (LI.getAlignment() == 0 ? TD->getABITypeAlignment(LI.getType()) :
11871 LI.getAlignment()))
11872 LI.setAlignment(KnownAlign);
11875 // load (cast X) --> cast (load X) iff safe.
11876 if (isa<CastInst>(Op))
11877 if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
11880 // None of the following transforms are legal for volatile loads.
11881 if (LI.isVolatile()) return 0;
11883 // Do really simple store-to-load forwarding and load CSE, to catch cases
11884 // where there are several consequtive memory accesses to the same location,
11885 // separated by a few arithmetic operations.
11886 BasicBlock::iterator BBI = &LI;
11887 if (Value *AvailableVal = FindAvailableLoadedValue(Op, LI.getParent(), BBI,6))
11888 return ReplaceInstUsesWith(LI, AvailableVal);
11890 // load(gep null, ...) -> unreachable
11891 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
11892 const Value *GEPI0 = GEPI->getOperand(0);
11893 // TODO: Consider a target hook for valid address spaces for this xform.
11894 if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
11895 // Insert a new store to null instruction before the load to indicate
11896 // that this code is not reachable. We do this instead of inserting
11897 // an unreachable instruction directly because we cannot modify the
11899 new StoreInst(UndefValue::get(LI.getType()),
11900 Constant::getNullValue(Op->getType()), &LI);
11901 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
11905 // load null/undef -> unreachable
11906 // TODO: Consider a target hook for valid address spaces for this xform.
11907 if (isa<UndefValue>(Op) ||
11908 (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) {
11909 // Insert a new store to null instruction before the load to indicate that
11910 // this code is not reachable. We do this instead of inserting an
11911 // unreachable instruction directly because we cannot modify the CFG.
11912 new StoreInst(UndefValue::get(LI.getType()),
11913 Constant::getNullValue(Op->getType()), &LI);
11914 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
11917 // Instcombine load (constantexpr_cast global) -> cast (load global)
11918 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op))
11920 if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
11923 if (Op->hasOneUse()) {
11924 // Change select and PHI nodes to select values instead of addresses: this
11925 // helps alias analysis out a lot, allows many others simplifications, and
11926 // exposes redundancy in the code.
11928 // Note that we cannot do the transformation unless we know that the
11929 // introduced loads cannot trap! Something like this is valid as long as
11930 // the condition is always false: load (select bool %C, int* null, int* %G),
11931 // but it would not be valid if we transformed it to load from null
11932 // unconditionally.
11934 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
11935 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
11936 if (isSafeToLoadUnconditionally(SI->getOperand(1), SI) &&
11937 isSafeToLoadUnconditionally(SI->getOperand(2), SI)) {
11938 Value *V1 = Builder->CreateLoad(SI->getOperand(1),
11939 SI->getOperand(1)->getName()+".val");
11940 Value *V2 = Builder->CreateLoad(SI->getOperand(2),
11941 SI->getOperand(2)->getName()+".val");
11942 return SelectInst::Create(SI->getCondition(), V1, V2);
11945 // load (select (cond, null, P)) -> load P
11946 if (Constant *C = dyn_cast<Constant>(SI->getOperand(1)))
11947 if (C->isNullValue()) {
11948 LI.setOperand(0, SI->getOperand(2));
11952 // load (select (cond, P, null)) -> load P
11953 if (Constant *C = dyn_cast<Constant>(SI->getOperand(2)))
11954 if (C->isNullValue()) {
11955 LI.setOperand(0, SI->getOperand(1));
11963 /// InstCombineStoreToCast - Fold store V, (cast P) -> store (cast V), P
11964 /// when possible. This makes it generally easy to do alias analysis and/or
11965 /// SROA/mem2reg of the memory object.
11966 static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
11967 User *CI = cast<User>(SI.getOperand(1));
11968 Value *CastOp = CI->getOperand(0);
11970 const Type *DestPTy = cast<PointerType>(CI->getType())->getElementType();
11971 const PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType());
11972 if (SrcTy == 0) return 0;
11974 const Type *SrcPTy = SrcTy->getElementType();
11976 if (!DestPTy->isInteger() && !isa<PointerType>(DestPTy))
11979 /// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep"
11980 /// to its first element. This allows us to handle things like:
11981 /// store i32 xxx, (bitcast {foo*, float}* %P to i32*)
11982 /// on 32-bit hosts.
11983 SmallVector<Value*, 4> NewGEPIndices;
11985 // If the source is an array, the code below will not succeed. Check to
11986 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
11988 if (isa<ArrayType>(SrcPTy) || isa<StructType>(SrcPTy)) {
11989 // Index through pointer.
11990 Constant *Zero = Constant::getNullValue(Type::getInt32Ty(*IC.getContext()));
11991 NewGEPIndices.push_back(Zero);
11994 if (const StructType *STy = dyn_cast<StructType>(SrcPTy)) {
11995 if (!STy->getNumElements()) /* Struct can be empty {} */
11997 NewGEPIndices.push_back(Zero);
11998 SrcPTy = STy->getElementType(0);
11999 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcPTy)) {
12000 NewGEPIndices.push_back(Zero);
12001 SrcPTy = ATy->getElementType();
12007 SrcTy = PointerType::get(SrcPTy, SrcTy->getAddressSpace());
12010 if (!SrcPTy->isInteger() && !isa<PointerType>(SrcPTy))
12013 // If the pointers point into different address spaces or if they point to
12014 // values with different sizes, we can't do the transformation.
12015 if (!IC.getTargetData() ||
12016 SrcTy->getAddressSpace() !=
12017 cast<PointerType>(CI->getType())->getAddressSpace() ||
12018 IC.getTargetData()->getTypeSizeInBits(SrcPTy) !=
12019 IC.getTargetData()->getTypeSizeInBits(DestPTy))
12022 // Okay, we are casting from one integer or pointer type to another of
12023 // the same size. Instead of casting the pointer before
12024 // the store, cast the value to be stored.
12026 Value *SIOp0 = SI.getOperand(0);
12027 Instruction::CastOps opcode = Instruction::BitCast;
12028 const Type* CastSrcTy = SIOp0->getType();
12029 const Type* CastDstTy = SrcPTy;
12030 if (isa<PointerType>(CastDstTy)) {
12031 if (CastSrcTy->isInteger())
12032 opcode = Instruction::IntToPtr;
12033 } else if (isa<IntegerType>(CastDstTy)) {
12034 if (isa<PointerType>(SIOp0->getType()))
12035 opcode = Instruction::PtrToInt;
12038 // SIOp0 is a pointer to aggregate and this is a store to the first field,
12039 // emit a GEP to index into its first field.
12040 if (!NewGEPIndices.empty())
12041 CastOp = IC.Builder->CreateInBoundsGEP(CastOp, NewGEPIndices.begin(),
12042 NewGEPIndices.end());
12044 NewCast = IC.Builder->CreateCast(opcode, SIOp0, CastDstTy,
12045 SIOp0->getName()+".c");
12046 return new StoreInst(NewCast, CastOp);
12049 /// equivalentAddressValues - Test if A and B will obviously have the same
12050 /// value. This includes recognizing that %t0 and %t1 will have the same
12051 /// value in code like this:
12052 /// %t0 = getelementptr \@a, 0, 3
12053 /// store i32 0, i32* %t0
12054 /// %t1 = getelementptr \@a, 0, 3
12055 /// %t2 = load i32* %t1
12057 static bool equivalentAddressValues(Value *A, Value *B) {
12058 // Test if the values are trivially equivalent.
12059 if (A == B) return true;
12061 // Test if the values come form identical arithmetic instructions.
12062 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
12063 // its only used to compare two uses within the same basic block, which
12064 // means that they'll always either have the same value or one of them
12065 // will have an undefined value.
12066 if (isa<BinaryOperator>(A) ||
12067 isa<CastInst>(A) ||
12069 isa<GetElementPtrInst>(A))
12070 if (Instruction *BI = dyn_cast<Instruction>(B))
12071 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
12074 // Otherwise they may not be equivalent.
12078 // If this instruction has two uses, one of which is a llvm.dbg.declare,
12079 // return the llvm.dbg.declare.
12080 DbgDeclareInst *InstCombiner::hasOneUsePlusDeclare(Value *V) {
12081 if (!V->hasNUses(2))
12083 for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
12085 if (DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(UI))
12087 if (isa<BitCastInst>(UI) && UI->hasOneUse()) {
12088 if (DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(UI->use_begin()))
12095 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
12096 Value *Val = SI.getOperand(0);
12097 Value *Ptr = SI.getOperand(1);
12099 // If the RHS is an alloca with a single use, zapify the store, making the
12101 // If the RHS is an alloca with a two uses, the other one being a
12102 // llvm.dbg.declare, zapify the store and the declare, making the
12103 // alloca dead. We must do this to prevent declare's from affecting
12105 if (!SI.isVolatile()) {
12106 if (Ptr->hasOneUse()) {
12107 if (isa<AllocaInst>(Ptr)) {
12108 EraseInstFromFunction(SI);
12112 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
12113 if (isa<AllocaInst>(GEP->getOperand(0))) {
12114 if (GEP->getOperand(0)->hasOneUse()) {
12115 EraseInstFromFunction(SI);
12119 if (DbgDeclareInst *DI = hasOneUsePlusDeclare(GEP->getOperand(0))) {
12120 EraseInstFromFunction(*DI);
12121 EraseInstFromFunction(SI);
12128 if (DbgDeclareInst *DI = hasOneUsePlusDeclare(Ptr)) {
12129 EraseInstFromFunction(*DI);
12130 EraseInstFromFunction(SI);
12136 // Attempt to improve the alignment.
12138 unsigned KnownAlign =
12139 GetOrEnforceKnownAlignment(Ptr, TD->getPrefTypeAlignment(Val->getType()));
12141 (SI.getAlignment() == 0 ? TD->getABITypeAlignment(Val->getType()) :
12142 SI.getAlignment()))
12143 SI.setAlignment(KnownAlign);
12146 // Do really simple DSE, to catch cases where there are several consecutive
12147 // stores to the same location, separated by a few arithmetic operations. This
12148 // situation often occurs with bitfield accesses.
12149 BasicBlock::iterator BBI = &SI;
12150 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
12153 // Don't count debug info directives, lest they affect codegen,
12154 // and we skip pointer-to-pointer bitcasts, which are NOPs.
12155 // It is necessary for correctness to skip those that feed into a
12156 // llvm.dbg.declare, as these are not present when debugging is off.
12157 if (isa<DbgInfoIntrinsic>(BBI) ||
12158 (isa<BitCastInst>(BBI) && isa<PointerType>(BBI->getType()))) {
12163 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
12164 // Prev store isn't volatile, and stores to the same location?
12165 if (!PrevSI->isVolatile() &&equivalentAddressValues(PrevSI->getOperand(1),
12166 SI.getOperand(1))) {
12169 EraseInstFromFunction(*PrevSI);
12175 // If this is a load, we have to stop. However, if the loaded value is from
12176 // the pointer we're loading and is producing the pointer we're storing,
12177 // then *this* store is dead (X = load P; store X -> P).
12178 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
12179 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
12180 !SI.isVolatile()) {
12181 EraseInstFromFunction(SI);
12185 // Otherwise, this is a load from some other location. Stores before it
12186 // may not be dead.
12190 // Don't skip over loads or things that can modify memory.
12191 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
12196 if (SI.isVolatile()) return 0; // Don't hack volatile stores.
12198 // store X, null -> turns into 'unreachable' in SimplifyCFG
12199 if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
12200 if (!isa<UndefValue>(Val)) {
12201 SI.setOperand(0, UndefValue::get(Val->getType()));
12202 if (Instruction *U = dyn_cast<Instruction>(Val))
12203 Worklist.Add(U); // Dropped a use.
12206 return 0; // Do not modify these!
12209 // store undef, Ptr -> noop
12210 if (isa<UndefValue>(Val)) {
12211 EraseInstFromFunction(SI);
12216 // If the pointer destination is a cast, see if we can fold the cast into the
12218 if (isa<CastInst>(Ptr))
12219 if (Instruction *Res = InstCombineStoreToCast(*this, SI))
12221 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
12223 if (Instruction *Res = InstCombineStoreToCast(*this, SI))
12227 // If this store is the last instruction in the basic block (possibly
12228 // excepting debug info instructions and the pointer bitcasts that feed
12229 // into them), and if the block ends with an unconditional branch, try
12230 // to move it to the successor block.
12234 } while (isa<DbgInfoIntrinsic>(BBI) ||
12235 (isa<BitCastInst>(BBI) && isa<PointerType>(BBI->getType())));
12236 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
12237 if (BI->isUnconditional())
12238 if (SimplifyStoreAtEndOfBlock(SI))
12239 return 0; // xform done!
12244 /// SimplifyStoreAtEndOfBlock - Turn things like:
12245 /// if () { *P = v1; } else { *P = v2 }
12246 /// into a phi node with a store in the successor.
12248 /// Simplify things like:
12249 /// *P = v1; if () { *P = v2; }
12250 /// into a phi node with a store in the successor.
12252 bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
12253 BasicBlock *StoreBB = SI.getParent();
12255 // Check to see if the successor block has exactly two incoming edges. If
12256 // so, see if the other predecessor contains a store to the same location.
12257 // if so, insert a PHI node (if needed) and move the stores down.
12258 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
12260 // Determine whether Dest has exactly two predecessors and, if so, compute
12261 // the other predecessor.
12262 pred_iterator PI = pred_begin(DestBB);
12263 BasicBlock *OtherBB = 0;
12264 if (*PI != StoreBB)
12267 if (PI == pred_end(DestBB))
12270 if (*PI != StoreBB) {
12275 if (++PI != pred_end(DestBB))
12278 // Bail out if all the relevant blocks aren't distinct (this can happen,
12279 // for example, if SI is in an infinite loop)
12280 if (StoreBB == DestBB || OtherBB == DestBB)
12283 // Verify that the other block ends in a branch and is not otherwise empty.
12284 BasicBlock::iterator BBI = OtherBB->getTerminator();
12285 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
12286 if (!OtherBr || BBI == OtherBB->begin())
12289 // If the other block ends in an unconditional branch, check for the 'if then
12290 // else' case. there is an instruction before the branch.
12291 StoreInst *OtherStore = 0;
12292 if (OtherBr->isUnconditional()) {
12294 // Skip over debugging info.
12295 while (isa<DbgInfoIntrinsic>(BBI) ||
12296 (isa<BitCastInst>(BBI) && isa<PointerType>(BBI->getType()))) {
12297 if (BBI==OtherBB->begin())
12301 // If this isn't a store, isn't a store to the same location, or if the
12302 // alignments differ, bail out.
12303 OtherStore = dyn_cast<StoreInst>(BBI);
12304 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
12305 OtherStore->getAlignment() != SI.getAlignment())
12308 // Otherwise, the other block ended with a conditional branch. If one of the
12309 // destinations is StoreBB, then we have the if/then case.
12310 if (OtherBr->getSuccessor(0) != StoreBB &&
12311 OtherBr->getSuccessor(1) != StoreBB)
12314 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
12315 // if/then triangle. See if there is a store to the same ptr as SI that
12316 // lives in OtherBB.
12318 // Check to see if we find the matching store.
12319 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
12320 if (OtherStore->getOperand(1) != SI.getOperand(1) ||
12321 OtherStore->getAlignment() != SI.getAlignment())
12325 // If we find something that may be using or overwriting the stored
12326 // value, or if we run out of instructions, we can't do the xform.
12327 if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
12328 BBI == OtherBB->begin())
12332 // In order to eliminate the store in OtherBr, we have to
12333 // make sure nothing reads or overwrites the stored value in
12335 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
12336 // FIXME: This should really be AA driven.
12337 if (I->mayReadFromMemory() || I->mayWriteToMemory())
12342 // Insert a PHI node now if we need it.
12343 Value *MergedVal = OtherStore->getOperand(0);
12344 if (MergedVal != SI.getOperand(0)) {
12345 PHINode *PN = PHINode::Create(MergedVal->getType(), "storemerge");
12346 PN->reserveOperandSpace(2);
12347 PN->addIncoming(SI.getOperand(0), SI.getParent());
12348 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
12349 MergedVal = InsertNewInstBefore(PN, DestBB->front());
12352 // Advance to a place where it is safe to insert the new store and
12354 BBI = DestBB->getFirstNonPHI();
12355 InsertNewInstBefore(new StoreInst(MergedVal, SI.getOperand(1),
12356 OtherStore->isVolatile(),
12357 SI.getAlignment()), *BBI);
12359 // Nuke the old stores.
12360 EraseInstFromFunction(SI);
12361 EraseInstFromFunction(*OtherStore);
12367 Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
12368 // Change br (not X), label True, label False to: br X, label False, True
12370 BasicBlock *TrueDest;
12371 BasicBlock *FalseDest;
12372 if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest)) &&
12373 !isa<Constant>(X)) {
12374 // Swap Destinations and condition...
12375 BI.setCondition(X);
12376 BI.setSuccessor(0, FalseDest);
12377 BI.setSuccessor(1, TrueDest);
12381 // Cannonicalize fcmp_one -> fcmp_oeq
12382 FCmpInst::Predicate FPred; Value *Y;
12383 if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)),
12384 TrueDest, FalseDest)) &&
12385 BI.getCondition()->hasOneUse())
12386 if (FPred == FCmpInst::FCMP_ONE || FPred == FCmpInst::FCMP_OLE ||
12387 FPred == FCmpInst::FCMP_OGE) {
12388 FCmpInst *Cond = cast<FCmpInst>(BI.getCondition());
12389 Cond->setPredicate(FCmpInst::getInversePredicate(FPred));
12391 // Swap Destinations and condition.
12392 BI.setSuccessor(0, FalseDest);
12393 BI.setSuccessor(1, TrueDest);
12394 Worklist.Add(Cond);
12398 // Cannonicalize icmp_ne -> icmp_eq
12399 ICmpInst::Predicate IPred;
12400 if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)),
12401 TrueDest, FalseDest)) &&
12402 BI.getCondition()->hasOneUse())
12403 if (IPred == ICmpInst::ICMP_NE || IPred == ICmpInst::ICMP_ULE ||
12404 IPred == ICmpInst::ICMP_SLE || IPred == ICmpInst::ICMP_UGE ||
12405 IPred == ICmpInst::ICMP_SGE) {
12406 ICmpInst *Cond = cast<ICmpInst>(BI.getCondition());
12407 Cond->setPredicate(ICmpInst::getInversePredicate(IPred));
12408 // Swap Destinations and condition.
12409 BI.setSuccessor(0, FalseDest);
12410 BI.setSuccessor(1, TrueDest);
12411 Worklist.Add(Cond);
12418 Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
12419 Value *Cond = SI.getCondition();
12420 if (Instruction *I = dyn_cast<Instruction>(Cond)) {
12421 if (I->getOpcode() == Instruction::Add)
12422 if (ConstantInt *AddRHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
12423 // change 'switch (X+4) case 1:' into 'switch (X) case -3'
12424 for (unsigned i = 2, e = SI.getNumOperands(); i != e; i += 2)
12426 ConstantExpr::getSub(cast<Constant>(SI.getOperand(i)),
12428 SI.setOperand(0, I->getOperand(0));
12436 Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
12437 Value *Agg = EV.getAggregateOperand();
12439 if (!EV.hasIndices())
12440 return ReplaceInstUsesWith(EV, Agg);
12442 if (Constant *C = dyn_cast<Constant>(Agg)) {
12443 if (isa<UndefValue>(C))
12444 return ReplaceInstUsesWith(EV, UndefValue::get(EV.getType()));
12446 if (isa<ConstantAggregateZero>(C))
12447 return ReplaceInstUsesWith(EV, Constant::getNullValue(EV.getType()));
12449 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C)) {
12450 // Extract the element indexed by the first index out of the constant
12451 Value *V = C->getOperand(*EV.idx_begin());
12452 if (EV.getNumIndices() > 1)
12453 // Extract the remaining indices out of the constant indexed by the
12455 return ExtractValueInst::Create(V, EV.idx_begin() + 1, EV.idx_end());
12457 return ReplaceInstUsesWith(EV, V);
12459 return 0; // Can't handle other constants
12461 if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
12462 // We're extracting from an insertvalue instruction, compare the indices
12463 const unsigned *exti, *exte, *insi, *inse;
12464 for (exti = EV.idx_begin(), insi = IV->idx_begin(),
12465 exte = EV.idx_end(), inse = IV->idx_end();
12466 exti != exte && insi != inse;
12468 if (*insi != *exti)
12469 // The insert and extract both reference distinctly different elements.
12470 // This means the extract is not influenced by the insert, and we can
12471 // replace the aggregate operand of the extract with the aggregate
12472 // operand of the insert. i.e., replace
12473 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
12474 // %E = extractvalue { i32, { i32 } } %I, 0
12476 // %E = extractvalue { i32, { i32 } } %A, 0
12477 return ExtractValueInst::Create(IV->getAggregateOperand(),
12478 EV.idx_begin(), EV.idx_end());
12480 if (exti == exte && insi == inse)
12481 // Both iterators are at the end: Index lists are identical. Replace
12482 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
12483 // %C = extractvalue { i32, { i32 } } %B, 1, 0
12485 return ReplaceInstUsesWith(EV, IV->getInsertedValueOperand());
12486 if (exti == exte) {
12487 // The extract list is a prefix of the insert list. i.e. replace
12488 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
12489 // %E = extractvalue { i32, { i32 } } %I, 1
12491 // %X = extractvalue { i32, { i32 } } %A, 1
12492 // %E = insertvalue { i32 } %X, i32 42, 0
12493 // by switching the order of the insert and extract (though the
12494 // insertvalue should be left in, since it may have other uses).
12495 Value *NewEV = Builder->CreateExtractValue(IV->getAggregateOperand(),
12496 EV.idx_begin(), EV.idx_end());
12497 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
12501 // The insert list is a prefix of the extract list
12502 // We can simply remove the common indices from the extract and make it
12503 // operate on the inserted value instead of the insertvalue result.
12505 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
12506 // %E = extractvalue { i32, { i32 } } %I, 1, 0
12508 // %E extractvalue { i32 } { i32 42 }, 0
12509 return ExtractValueInst::Create(IV->getInsertedValueOperand(),
12512 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Agg)) {
12513 // We're extracting from an intrinsic, see if we're the only user, which
12514 // allows us to simplify multiple result intrinsics to simpler things that
12515 // just get one value..
12516 if (II->hasOneUse()) {
12517 // Check if we're grabbing the overflow bit or the result of a 'with
12518 // overflow' intrinsic. If it's the latter we can remove the intrinsic
12519 // and replace it with a traditional binary instruction.
12520 switch (II->getIntrinsicID()) {
12521 case Intrinsic::uadd_with_overflow:
12522 case Intrinsic::sadd_with_overflow:
12523 if (*EV.idx_begin() == 0) { // Normal result.
12524 Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
12525 II->replaceAllUsesWith(UndefValue::get(II->getType()));
12526 EraseInstFromFunction(*II);
12527 return BinaryOperator::CreateAdd(LHS, RHS);
12530 case Intrinsic::usub_with_overflow:
12531 case Intrinsic::ssub_with_overflow:
12532 if (*EV.idx_begin() == 0) { // Normal result.
12533 Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
12534 II->replaceAllUsesWith(UndefValue::get(II->getType()));
12535 EraseInstFromFunction(*II);
12536 return BinaryOperator::CreateSub(LHS, RHS);
12539 case Intrinsic::umul_with_overflow:
12540 case Intrinsic::smul_with_overflow:
12541 if (*EV.idx_begin() == 0) { // Normal result.
12542 Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
12543 II->replaceAllUsesWith(UndefValue::get(II->getType()));
12544 EraseInstFromFunction(*II);
12545 return BinaryOperator::CreateMul(LHS, RHS);
12553 // Can't simplify extracts from other values. Note that nested extracts are
12554 // already simplified implicitely by the above (extract ( extract (insert) )
12555 // will be translated into extract ( insert ( extract ) ) first and then just
12556 // the value inserted, if appropriate).
12560 /// CheapToScalarize - Return true if the value is cheaper to scalarize than it
12561 /// is to leave as a vector operation.
12562 static bool CheapToScalarize(Value *V, bool isConstant) {
12563 if (isa<ConstantAggregateZero>(V))
12565 if (ConstantVector *C = dyn_cast<ConstantVector>(V)) {
12566 if (isConstant) return true;
12567 // If all elts are the same, we can extract.
12568 Constant *Op0 = C->getOperand(0);
12569 for (unsigned i = 1; i < C->getNumOperands(); ++i)
12570 if (C->getOperand(i) != Op0)
12574 Instruction *I = dyn_cast<Instruction>(V);
12575 if (!I) return false;
12577 // Insert element gets simplified to the inserted element or is deleted if
12578 // this is constant idx extract element and its a constant idx insertelt.
12579 if (I->getOpcode() == Instruction::InsertElement && isConstant &&
12580 isa<ConstantInt>(I->getOperand(2)))
12582 if (I->getOpcode() == Instruction::Load && I->hasOneUse())
12584 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I))
12585 if (BO->hasOneUse() &&
12586 (CheapToScalarize(BO->getOperand(0), isConstant) ||
12587 CheapToScalarize(BO->getOperand(1), isConstant)))
12589 if (CmpInst *CI = dyn_cast<CmpInst>(I))
12590 if (CI->hasOneUse() &&
12591 (CheapToScalarize(CI->getOperand(0), isConstant) ||
12592 CheapToScalarize(CI->getOperand(1), isConstant)))
12598 /// Read and decode a shufflevector mask.
12600 /// It turns undef elements into values that are larger than the number of
12601 /// elements in the input.
12602 static std::vector<unsigned> getShuffleMask(const ShuffleVectorInst *SVI) {
12603 unsigned NElts = SVI->getType()->getNumElements();
12604 if (isa<ConstantAggregateZero>(SVI->getOperand(2)))
12605 return std::vector<unsigned>(NElts, 0);
12606 if (isa<UndefValue>(SVI->getOperand(2)))
12607 return std::vector<unsigned>(NElts, 2*NElts);
12609 std::vector<unsigned> Result;
12610 const ConstantVector *CP = cast<ConstantVector>(SVI->getOperand(2));
12611 for (User::const_op_iterator i = CP->op_begin(), e = CP->op_end(); i!=e; ++i)
12612 if (isa<UndefValue>(*i))
12613 Result.push_back(NElts*2); // undef -> 8
12615 Result.push_back(cast<ConstantInt>(*i)->getZExtValue());
12619 /// FindScalarElement - Given a vector and an element number, see if the scalar
12620 /// value is already around as a register, for example if it were inserted then
12621 /// extracted from the vector.
12622 static Value *FindScalarElement(Value *V, unsigned EltNo,
12623 LLVMContext *Context) {
12624 assert(isa<VectorType>(V->getType()) && "Not looking at a vector?");
12625 const VectorType *PTy = cast<VectorType>(V->getType());
12626 unsigned Width = PTy->getNumElements();
12627 if (EltNo >= Width) // Out of range access.
12628 return UndefValue::get(PTy->getElementType());
12630 if (isa<UndefValue>(V))
12631 return UndefValue::get(PTy->getElementType());
12632 else if (isa<ConstantAggregateZero>(V))
12633 return Constant::getNullValue(PTy->getElementType());
12634 else if (ConstantVector *CP = dyn_cast<ConstantVector>(V))
12635 return CP->getOperand(EltNo);
12636 else if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) {
12637 // If this is an insert to a variable element, we don't know what it is.
12638 if (!isa<ConstantInt>(III->getOperand(2)))
12640 unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue();
12642 // If this is an insert to the element we are looking for, return the
12644 if (EltNo == IIElt)
12645 return III->getOperand(1);
12647 // Otherwise, the insertelement doesn't modify the value, recurse on its
12649 return FindScalarElement(III->getOperand(0), EltNo, Context);
12650 } else if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V)) {
12651 unsigned LHSWidth =
12652 cast<VectorType>(SVI->getOperand(0)->getType())->getNumElements();
12653 unsigned InEl = getShuffleMask(SVI)[EltNo];
12654 if (InEl < LHSWidth)
12655 return FindScalarElement(SVI->getOperand(0), InEl, Context);
12656 else if (InEl < LHSWidth*2)
12657 return FindScalarElement(SVI->getOperand(1), InEl - LHSWidth, Context);
12659 return UndefValue::get(PTy->getElementType());
12662 // Otherwise, we don't know.
12666 Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
12667 // If vector val is undef, replace extract with scalar undef.
12668 if (isa<UndefValue>(EI.getOperand(0)))
12669 return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType()));
12671 // If vector val is constant 0, replace extract with scalar 0.
12672 if (isa<ConstantAggregateZero>(EI.getOperand(0)))
12673 return ReplaceInstUsesWith(EI, Constant::getNullValue(EI.getType()));
12675 if (ConstantVector *C = dyn_cast<ConstantVector>(EI.getOperand(0))) {
12676 // If vector val is constant with all elements the same, replace EI with
12677 // that element. When the elements are not identical, we cannot replace yet
12678 // (we do that below, but only when the index is constant).
12679 Constant *op0 = C->getOperand(0);
12680 for (unsigned i = 1; i != C->getNumOperands(); ++i)
12681 if (C->getOperand(i) != op0) {
12686 return ReplaceInstUsesWith(EI, op0);
12689 // If extracting a specified index from the vector, see if we can recursively
12690 // find a previously computed scalar that was inserted into the vector.
12691 if (ConstantInt *IdxC = dyn_cast<ConstantInt>(EI.getOperand(1))) {
12692 unsigned IndexVal = IdxC->getZExtValue();
12693 unsigned VectorWidth = EI.getVectorOperandType()->getNumElements();
12695 // If this is extracting an invalid index, turn this into undef, to avoid
12696 // crashing the code below.
12697 if (IndexVal >= VectorWidth)
12698 return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType()));
12700 // This instruction only demands the single element from the input vector.
12701 // If the input vector has a single use, simplify it based on this use
12703 if (EI.getOperand(0)->hasOneUse() && VectorWidth != 1) {
12704 APInt UndefElts(VectorWidth, 0);
12705 APInt DemandedMask(VectorWidth, 1 << IndexVal);
12706 if (Value *V = SimplifyDemandedVectorElts(EI.getOperand(0),
12707 DemandedMask, UndefElts)) {
12708 EI.setOperand(0, V);
12713 if (Value *Elt = FindScalarElement(EI.getOperand(0), IndexVal, Context))
12714 return ReplaceInstUsesWith(EI, Elt);
12716 // If the this extractelement is directly using a bitcast from a vector of
12717 // the same number of elements, see if we can find the source element from
12718 // it. In this case, we will end up needing to bitcast the scalars.
12719 if (BitCastInst *BCI = dyn_cast<BitCastInst>(EI.getOperand(0))) {
12720 if (const VectorType *VT =
12721 dyn_cast<VectorType>(BCI->getOperand(0)->getType()))
12722 if (VT->getNumElements() == VectorWidth)
12723 if (Value *Elt = FindScalarElement(BCI->getOperand(0),
12724 IndexVal, Context))
12725 return new BitCastInst(Elt, EI.getType());
12729 if (Instruction *I = dyn_cast<Instruction>(EI.getOperand(0))) {
12730 // Push extractelement into predecessor operation if legal and
12731 // profitable to do so
12732 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
12733 if (I->hasOneUse() &&
12734 CheapToScalarize(BO, isa<ConstantInt>(EI.getOperand(1)))) {
12736 Builder->CreateExtractElement(BO->getOperand(0), EI.getOperand(1),
12737 EI.getName()+".lhs");
12739 Builder->CreateExtractElement(BO->getOperand(1), EI.getOperand(1),
12740 EI.getName()+".rhs");
12741 return BinaryOperator::Create(BO->getOpcode(), newEI0, newEI1);
12743 } else if (InsertElementInst *IE = dyn_cast<InsertElementInst>(I)) {
12744 // Extracting the inserted element?
12745 if (IE->getOperand(2) == EI.getOperand(1))
12746 return ReplaceInstUsesWith(EI, IE->getOperand(1));
12747 // If the inserted and extracted elements are constants, they must not
12748 // be the same value, extract from the pre-inserted value instead.
12749 if (isa<Constant>(IE->getOperand(2)) && isa<Constant>(EI.getOperand(1))) {
12750 Worklist.AddValue(EI.getOperand(0));
12751 EI.setOperand(0, IE->getOperand(0));
12754 } else if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I)) {
12755 // If this is extracting an element from a shufflevector, figure out where
12756 // it came from and extract from the appropriate input element instead.
12757 if (ConstantInt *Elt = dyn_cast<ConstantInt>(EI.getOperand(1))) {
12758 unsigned SrcIdx = getShuffleMask(SVI)[Elt->getZExtValue()];
12760 unsigned LHSWidth =
12761 cast<VectorType>(SVI->getOperand(0)->getType())->getNumElements();
12763 if (SrcIdx < LHSWidth)
12764 Src = SVI->getOperand(0);
12765 else if (SrcIdx < LHSWidth*2) {
12766 SrcIdx -= LHSWidth;
12767 Src = SVI->getOperand(1);
12769 return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType()));
12771 return ExtractElementInst::Create(Src,
12772 ConstantInt::get(Type::getInt32Ty(*Context), SrcIdx,
12776 // FIXME: Canonicalize extractelement(bitcast) -> bitcast(extractelement)
12781 /// CollectSingleShuffleElements - If V is a shuffle of values that ONLY returns
12782 /// elements from either LHS or RHS, return the shuffle mask and true.
12783 /// Otherwise, return false.
12784 static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
12785 std::vector<Constant*> &Mask,
12786 LLVMContext *Context) {
12787 assert(V->getType() == LHS->getType() && V->getType() == RHS->getType() &&
12788 "Invalid CollectSingleShuffleElements");
12789 unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
12791 if (isa<UndefValue>(V)) {
12792 Mask.assign(NumElts, UndefValue::get(Type::getInt32Ty(*Context)));
12794 } else if (V == LHS) {
12795 for (unsigned i = 0; i != NumElts; ++i)
12796 Mask.push_back(ConstantInt::get(Type::getInt32Ty(*Context), i));
12798 } else if (V == RHS) {
12799 for (unsigned i = 0; i != NumElts; ++i)
12800 Mask.push_back(ConstantInt::get(Type::getInt32Ty(*Context), i+NumElts));
12802 } else if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(V)) {
12803 // If this is an insert of an extract from some other vector, include it.
12804 Value *VecOp = IEI->getOperand(0);
12805 Value *ScalarOp = IEI->getOperand(1);
12806 Value *IdxOp = IEI->getOperand(2);
12808 if (!isa<ConstantInt>(IdxOp))
12810 unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
12812 if (isa<UndefValue>(ScalarOp)) { // inserting undef into vector.
12813 // Okay, we can handle this if the vector we are insertinting into is
12814 // transitively ok.
12815 if (CollectSingleShuffleElements(VecOp, LHS, RHS, Mask, Context)) {
12816 // If so, update the mask to reflect the inserted undef.
12817 Mask[InsertedIdx] = UndefValue::get(Type::getInt32Ty(*Context));
12820 } else if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)){
12821 if (isa<ConstantInt>(EI->getOperand(1)) &&
12822 EI->getOperand(0)->getType() == V->getType()) {
12823 unsigned ExtractedIdx =
12824 cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
12826 // This must be extracting from either LHS or RHS.
12827 if (EI->getOperand(0) == LHS || EI->getOperand(0) == RHS) {
12828 // Okay, we can handle this if the vector we are insertinting into is
12829 // transitively ok.
12830 if (CollectSingleShuffleElements(VecOp, LHS, RHS, Mask, Context)) {
12831 // If so, update the mask to reflect the inserted value.
12832 if (EI->getOperand(0) == LHS) {
12833 Mask[InsertedIdx % NumElts] =
12834 ConstantInt::get(Type::getInt32Ty(*Context), ExtractedIdx);
12836 assert(EI->getOperand(0) == RHS);
12837 Mask[InsertedIdx % NumElts] =
12838 ConstantInt::get(Type::getInt32Ty(*Context), ExtractedIdx+NumElts);
12847 // TODO: Handle shufflevector here!
12852 /// CollectShuffleElements - We are building a shuffle of V, using RHS as the
12853 /// RHS of the shuffle instruction, if it is not null. Return a shuffle mask
12854 /// that computes V and the LHS value of the shuffle.
12855 static Value *CollectShuffleElements(Value *V, std::vector<Constant*> &Mask,
12856 Value *&RHS, LLVMContext *Context) {
12857 assert(isa<VectorType>(V->getType()) &&
12858 (RHS == 0 || V->getType() == RHS->getType()) &&
12859 "Invalid shuffle!");
12860 unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
12862 if (isa<UndefValue>(V)) {
12863 Mask.assign(NumElts, UndefValue::get(Type::getInt32Ty(*Context)));
12865 } else if (isa<ConstantAggregateZero>(V)) {
12866 Mask.assign(NumElts, ConstantInt::get(Type::getInt32Ty(*Context), 0));
12868 } else if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(V)) {
12869 // If this is an insert of an extract from some other vector, include it.
12870 Value *VecOp = IEI->getOperand(0);
12871 Value *ScalarOp = IEI->getOperand(1);
12872 Value *IdxOp = IEI->getOperand(2);
12874 if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)) {
12875 if (isa<ConstantInt>(EI->getOperand(1)) && isa<ConstantInt>(IdxOp) &&
12876 EI->getOperand(0)->getType() == V->getType()) {
12877 unsigned ExtractedIdx =
12878 cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
12879 unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
12881 // Either the extracted from or inserted into vector must be RHSVec,
12882 // otherwise we'd end up with a shuffle of three inputs.
12883 if (EI->getOperand(0) == RHS || RHS == 0) {
12884 RHS = EI->getOperand(0);
12885 Value *V = CollectShuffleElements(VecOp, Mask, RHS, Context);
12886 Mask[InsertedIdx % NumElts] =
12887 ConstantInt::get(Type::getInt32Ty(*Context), NumElts+ExtractedIdx);
12891 if (VecOp == RHS) {
12892 Value *V = CollectShuffleElements(EI->getOperand(0), Mask,
12894 // Everything but the extracted element is replaced with the RHS.
12895 for (unsigned i = 0; i != NumElts; ++i) {
12896 if (i != InsertedIdx)
12897 Mask[i] = ConstantInt::get(Type::getInt32Ty(*Context), NumElts+i);
12902 // If this insertelement is a chain that comes from exactly these two
12903 // vectors, return the vector and the effective shuffle.
12904 if (CollectSingleShuffleElements(IEI, EI->getOperand(0), RHS, Mask,
12906 return EI->getOperand(0);
12911 // TODO: Handle shufflevector here!
12913 // Otherwise, can't do anything fancy. Return an identity vector.
12914 for (unsigned i = 0; i != NumElts; ++i)
12915 Mask.push_back(ConstantInt::get(Type::getInt32Ty(*Context), i));
12919 Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) {
12920 Value *VecOp = IE.getOperand(0);
12921 Value *ScalarOp = IE.getOperand(1);
12922 Value *IdxOp = IE.getOperand(2);
12924 // Inserting an undef or into an undefined place, remove this.
12925 if (isa<UndefValue>(ScalarOp) || isa<UndefValue>(IdxOp))
12926 ReplaceInstUsesWith(IE, VecOp);
12928 // If the inserted element was extracted from some other vector, and if the
12929 // indexes are constant, try to turn this into a shufflevector operation.
12930 if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)) {
12931 if (isa<ConstantInt>(EI->getOperand(1)) && isa<ConstantInt>(IdxOp) &&
12932 EI->getOperand(0)->getType() == IE.getType()) {
12933 unsigned NumVectorElts = IE.getType()->getNumElements();
12934 unsigned ExtractedIdx =
12935 cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
12936 unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
12938 if (ExtractedIdx >= NumVectorElts) // Out of range extract.
12939 return ReplaceInstUsesWith(IE, VecOp);
12941 if (InsertedIdx >= NumVectorElts) // Out of range insert.
12942 return ReplaceInstUsesWith(IE, UndefValue::get(IE.getType()));
12944 // If we are extracting a value from a vector, then inserting it right
12945 // back into the same place, just use the input vector.
12946 if (EI->getOperand(0) == VecOp && ExtractedIdx == InsertedIdx)
12947 return ReplaceInstUsesWith(IE, VecOp);
12949 // If this insertelement isn't used by some other insertelement, turn it
12950 // (and any insertelements it points to), into one big shuffle.
12951 if (!IE.hasOneUse() || !isa<InsertElementInst>(IE.use_back())) {
12952 std::vector<Constant*> Mask;
12954 Value *LHS = CollectShuffleElements(&IE, Mask, RHS, Context);
12955 if (RHS == 0) RHS = UndefValue::get(LHS->getType());
12956 // We now have a shuffle of LHS, RHS, Mask.
12957 return new ShuffleVectorInst(LHS, RHS,
12958 ConstantVector::get(Mask));
12963 unsigned VWidth = cast<VectorType>(VecOp->getType())->getNumElements();
12964 APInt UndefElts(VWidth, 0);
12965 APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
12966 if (SimplifyDemandedVectorElts(&IE, AllOnesEltMask, UndefElts))
12973 Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
12974 Value *LHS = SVI.getOperand(0);
12975 Value *RHS = SVI.getOperand(1);
12976 std::vector<unsigned> Mask = getShuffleMask(&SVI);
12978 bool MadeChange = false;
12980 // Undefined shuffle mask -> undefined value.
12981 if (isa<UndefValue>(SVI.getOperand(2)))
12982 return ReplaceInstUsesWith(SVI, UndefValue::get(SVI.getType()));
12984 unsigned VWidth = cast<VectorType>(SVI.getType())->getNumElements();
12986 if (VWidth != cast<VectorType>(LHS->getType())->getNumElements())
12989 APInt UndefElts(VWidth, 0);
12990 APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
12991 if (SimplifyDemandedVectorElts(&SVI, AllOnesEltMask, UndefElts)) {
12992 LHS = SVI.getOperand(0);
12993 RHS = SVI.getOperand(1);
12997 // Canonicalize shuffle(x ,x,mask) -> shuffle(x, undef,mask')
12998 // Canonicalize shuffle(undef,x,mask) -> shuffle(x, undef,mask').
12999 if (LHS == RHS || isa<UndefValue>(LHS)) {
13000 if (isa<UndefValue>(LHS) && LHS == RHS) {
13001 // shuffle(undef,undef,mask) -> undef.
13002 return ReplaceInstUsesWith(SVI, LHS);
13005 // Remap any references to RHS to use LHS.
13006 std::vector<Constant*> Elts;
13007 for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
13008 if (Mask[i] >= 2*e)
13009 Elts.push_back(UndefValue::get(Type::getInt32Ty(*Context)));
13011 if ((Mask[i] >= e && isa<UndefValue>(RHS)) ||
13012 (Mask[i] < e && isa<UndefValue>(LHS))) {
13013 Mask[i] = 2*e; // Turn into undef.
13014 Elts.push_back(UndefValue::get(Type::getInt32Ty(*Context)));
13016 Mask[i] = Mask[i] % e; // Force to LHS.
13017 Elts.push_back(ConstantInt::get(Type::getInt32Ty(*Context), Mask[i]));
13021 SVI.setOperand(0, SVI.getOperand(1));
13022 SVI.setOperand(1, UndefValue::get(RHS->getType()));
13023 SVI.setOperand(2, ConstantVector::get(Elts));
13024 LHS = SVI.getOperand(0);
13025 RHS = SVI.getOperand(1);
13029 // Analyze the shuffle, are the LHS or RHS and identity shuffles?
13030 bool isLHSID = true, isRHSID = true;
13032 for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
13033 if (Mask[i] >= e*2) continue; // Ignore undef values.
13034 // Is this an identity shuffle of the LHS value?
13035 isLHSID &= (Mask[i] == i);
13037 // Is this an identity shuffle of the RHS value?
13038 isRHSID &= (Mask[i]-e == i);
13041 // Eliminate identity shuffles.
13042 if (isLHSID) return ReplaceInstUsesWith(SVI, LHS);
13043 if (isRHSID) return ReplaceInstUsesWith(SVI, RHS);
13045 // If the LHS is a shufflevector itself, see if we can combine it with this
13046 // one without producing an unusual shuffle. Here we are really conservative:
13047 // we are absolutely afraid of producing a shuffle mask not in the input
13048 // program, because the code gen may not be smart enough to turn a merged
13049 // shuffle into two specific shuffles: it may produce worse code. As such,
13050 // we only merge two shuffles if the result is one of the two input shuffle
13051 // masks. In this case, merging the shuffles just removes one instruction,
13052 // which we know is safe. This is good for things like turning:
13053 // (splat(splat)) -> splat.
13054 if (ShuffleVectorInst *LHSSVI = dyn_cast<ShuffleVectorInst>(LHS)) {
13055 if (isa<UndefValue>(RHS)) {
13056 std::vector<unsigned> LHSMask = getShuffleMask(LHSSVI);
13058 if (LHSMask.size() == Mask.size()) {
13059 std::vector<unsigned> NewMask;
13060 for (unsigned i = 0, e = Mask.size(); i != e; ++i)
13062 NewMask.push_back(2*e);
13064 NewMask.push_back(LHSMask[Mask[i]]);
13066 // If the result mask is equal to the src shuffle or this
13067 // shuffle mask, do the replacement.
13068 if (NewMask == LHSMask || NewMask == Mask) {
13069 unsigned LHSInNElts =
13070 cast<VectorType>(LHSSVI->getOperand(0)->getType())->
13072 std::vector<Constant*> Elts;
13073 for (unsigned i = 0, e = NewMask.size(); i != e; ++i) {
13074 if (NewMask[i] >= LHSInNElts*2) {
13075 Elts.push_back(UndefValue::get(Type::getInt32Ty(*Context)));
13077 Elts.push_back(ConstantInt::get(Type::getInt32Ty(*Context),
13081 return new ShuffleVectorInst(LHSSVI->getOperand(0),
13082 LHSSVI->getOperand(1),
13083 ConstantVector::get(Elts));
13089 return MadeChange ? &SVI : 0;
13095 /// TryToSinkInstruction - Try to move the specified instruction from its
13096 /// current block into the beginning of DestBlock, which can only happen if it's
13097 /// safe to move the instruction past all of the instructions between it and the
13098 /// end of its block.
13099 static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
13100 assert(I->hasOneUse() && "Invariants didn't hold!");
13102 // Cannot move control-flow-involving, volatile loads, vaarg, etc.
13103 if (isa<PHINode>(I) || I->mayHaveSideEffects() || isa<TerminatorInst>(I))
13106 // Do not sink alloca instructions out of the entry block.
13107 if (isa<AllocaInst>(I) && I->getParent() ==
13108 &DestBlock->getParent()->getEntryBlock())
13111 // We can only sink load instructions if there is nothing between the load and
13112 // the end of block that could change the value.
13113 if (I->mayReadFromMemory()) {
13114 for (BasicBlock::iterator Scan = I, E = I->getParent()->end();
13116 if (Scan->mayWriteToMemory())
13120 BasicBlock::iterator InsertPos = DestBlock->getFirstNonPHI();
13122 CopyPrecedingStopPoint(I, InsertPos);
13123 I->moveBefore(InsertPos);
13129 /// AddReachableCodeToWorklist - Walk the function in depth-first order, adding
13130 /// all reachable code to the worklist.
13132 /// This has a couple of tricks to make the code faster and more powerful. In
13133 /// particular, we constant fold and DCE instructions as we go, to avoid adding
13134 /// them to the worklist (this significantly speeds up instcombine on code where
13135 /// many instructions are dead or constant). Additionally, if we find a branch
13136 /// whose condition is a known constant, we only visit the reachable successors.
13138 static bool AddReachableCodeToWorklist(BasicBlock *BB,
13139 SmallPtrSet<BasicBlock*, 64> &Visited,
13141 const TargetData *TD) {
13142 bool MadeIRChange = false;
13143 SmallVector<BasicBlock*, 256> Worklist;
13144 Worklist.push_back(BB);
13146 std::vector<Instruction*> InstrsForInstCombineWorklist;
13147 InstrsForInstCombineWorklist.reserve(128);
13149 SmallPtrSet<ConstantExpr*, 64> FoldedConstants;
13151 while (!Worklist.empty()) {
13152 BB = Worklist.back();
13153 Worklist.pop_back();
13155 // We have now visited this block! If we've already been here, ignore it.
13156 if (!Visited.insert(BB)) continue;
13158 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
13159 Instruction *Inst = BBI++;
13161 // DCE instruction if trivially dead.
13162 if (isInstructionTriviallyDead(Inst)) {
13164 DEBUG(errs() << "IC: DCE: " << *Inst << '\n');
13165 Inst->eraseFromParent();
13169 // ConstantProp instruction if trivially constant.
13170 if (!Inst->use_empty() && isa<Constant>(Inst->getOperand(0)))
13171 if (Constant *C = ConstantFoldInstruction(Inst, TD)) {
13172 DEBUG(errs() << "IC: ConstFold to: " << *C << " from: "
13174 Inst->replaceAllUsesWith(C);
13176 Inst->eraseFromParent();
13183 // See if we can constant fold its operands.
13184 for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end();
13186 ConstantExpr *CE = dyn_cast<ConstantExpr>(i);
13187 if (CE == 0) continue;
13189 // If we already folded this constant, don't try again.
13190 if (!FoldedConstants.insert(CE))
13193 Constant *NewC = ConstantFoldConstantExpression(CE, TD);
13194 if (NewC && NewC != CE) {
13196 MadeIRChange = true;
13202 InstrsForInstCombineWorklist.push_back(Inst);
13205 // Recursively visit successors. If this is a branch or switch on a
13206 // constant, only visit the reachable successor.
13207 TerminatorInst *TI = BB->getTerminator();
13208 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
13209 if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) {
13210 bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue();
13211 BasicBlock *ReachableBB = BI->getSuccessor(!CondVal);
13212 Worklist.push_back(ReachableBB);
13215 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
13216 if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
13217 // See if this is an explicit destination.
13218 for (unsigned i = 1, e = SI->getNumSuccessors(); i != e; ++i)
13219 if (SI->getCaseValue(i) == Cond) {
13220 BasicBlock *ReachableBB = SI->getSuccessor(i);
13221 Worklist.push_back(ReachableBB);
13225 // Otherwise it is the default destination.
13226 Worklist.push_back(SI->getSuccessor(0));
13231 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
13232 Worklist.push_back(TI->getSuccessor(i));
13235 // Once we've found all of the instructions to add to instcombine's worklist,
13236 // add them in reverse order. This way instcombine will visit from the top
13237 // of the function down. This jives well with the way that it adds all uses
13238 // of instructions to the worklist after doing a transformation, thus avoiding
13239 // some N^2 behavior in pathological cases.
13240 IC.Worklist.AddInitialGroup(&InstrsForInstCombineWorklist[0],
13241 InstrsForInstCombineWorklist.size());
13243 return MadeIRChange;
13246 bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
13247 MadeIRChange = false;
13249 DEBUG(errs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
13250 << F.getNameStr() << "\n");
13253 // Do a depth-first traversal of the function, populate the worklist with
13254 // the reachable instructions. Ignore blocks that are not reachable. Keep
13255 // track of which blocks we visit.
13256 SmallPtrSet<BasicBlock*, 64> Visited;
13257 MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this, TD);
13259 // Do a quick scan over the function. If we find any blocks that are
13260 // unreachable, remove any instructions inside of them. This prevents
13261 // the instcombine code from having to deal with some bad special cases.
13262 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
13263 if (!Visited.count(BB)) {
13264 Instruction *Term = BB->getTerminator();
13265 while (Term != BB->begin()) { // Remove instrs bottom-up
13266 BasicBlock::iterator I = Term; --I;
13268 DEBUG(errs() << "IC: DCE: " << *I << '\n');
13269 // A debug intrinsic shouldn't force another iteration if we weren't
13270 // going to do one without it.
13271 if (!isa<DbgInfoIntrinsic>(I)) {
13273 MadeIRChange = true;
13276 // If I is not void type then replaceAllUsesWith undef.
13277 // This allows ValueHandlers and custom metadata to adjust itself.
13278 if (!I->getType()->isVoidTy())
13279 I->replaceAllUsesWith(UndefValue::get(I->getType()));
13280 I->eraseFromParent();
13285 while (!Worklist.isEmpty()) {
13286 Instruction *I = Worklist.RemoveOne();
13287 if (I == 0) continue; // skip null values.
13289 // Check to see if we can DCE the instruction.
13290 if (isInstructionTriviallyDead(I)) {
13291 DEBUG(errs() << "IC: DCE: " << *I << '\n');
13292 EraseInstFromFunction(*I);
13294 MadeIRChange = true;
13298 // Instruction isn't dead, see if we can constant propagate it.
13299 if (!I->use_empty() && isa<Constant>(I->getOperand(0)))
13300 if (Constant *C = ConstantFoldInstruction(I, TD)) {
13301 DEBUG(errs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
13303 // Add operands to the worklist.
13304 ReplaceInstUsesWith(*I, C);
13306 EraseInstFromFunction(*I);
13307 MadeIRChange = true;
13311 // See if we can trivially sink this instruction to a successor basic block.
13312 if (I->hasOneUse()) {
13313 BasicBlock *BB = I->getParent();
13314 Instruction *UserInst = cast<Instruction>(I->use_back());
13315 BasicBlock *UserParent;
13317 // Get the block the use occurs in.
13318 if (PHINode *PN = dyn_cast<PHINode>(UserInst))
13319 UserParent = PN->getIncomingBlock(I->use_begin().getUse());
13321 UserParent = UserInst->getParent();
13323 if (UserParent != BB) {
13324 bool UserIsSuccessor = false;
13325 // See if the user is one of our successors.
13326 for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI)
13327 if (*SI == UserParent) {
13328 UserIsSuccessor = true;
13332 // If the user is one of our immediate successors, and if that successor
13333 // only has us as a predecessors (we'd have to split the critical edge
13334 // otherwise), we can keep going.
13335 if (UserIsSuccessor && UserParent->getSinglePredecessor())
13336 // Okay, the CFG is simple enough, try to sink this instruction.
13337 MadeIRChange |= TryToSinkInstruction(I, UserParent);
13341 // Now that we have an instruction, try combining it to simplify it.
13342 Builder->SetInsertPoint(I->getParent(), I);
13347 DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str(););
13348 DEBUG(errs() << "IC: Visiting: " << OrigI << '\n');
13350 if (Instruction *Result = visit(*I)) {
13352 // Should we replace the old instruction with a new one?
13354 DEBUG(errs() << "IC: Old = " << *I << '\n'
13355 << " New = " << *Result << '\n');
13357 // Everything uses the new instruction now.
13358 I->replaceAllUsesWith(Result);
13360 // Push the new instruction and any users onto the worklist.
13361 Worklist.Add(Result);
13362 Worklist.AddUsersToWorkList(*Result);
13364 // Move the name to the new instruction first.
13365 Result->takeName(I);
13367 // Insert the new instruction into the basic block...
13368 BasicBlock *InstParent = I->getParent();
13369 BasicBlock::iterator InsertPos = I;
13371 if (!isa<PHINode>(Result)) // If combining a PHI, don't insert
13372 while (isa<PHINode>(InsertPos)) // middle of a block of PHIs.
13375 InstParent->getInstList().insert(InsertPos, Result);
13377 EraseInstFromFunction(*I);
13380 DEBUG(errs() << "IC: Mod = " << OrigI << '\n'
13381 << " New = " << *I << '\n');
13384 // If the instruction was modified, it's possible that it is now dead.
13385 // if so, remove it.
13386 if (isInstructionTriviallyDead(I)) {
13387 EraseInstFromFunction(*I);
13390 Worklist.AddUsersToWorkList(*I);
13393 MadeIRChange = true;
13398 return MadeIRChange;
13402 bool InstCombiner::runOnFunction(Function &F) {
13403 MustPreserveLCSSA = mustPreserveAnalysisID(LCSSAID);
13404 Context = &F.getContext();
13405 TD = getAnalysisIfAvailable<TargetData>();
13408 /// Builder - This is an IRBuilder that automatically inserts new
13409 /// instructions into the worklist when they are created.
13410 IRBuilder<true, TargetFolder, InstCombineIRInserter>
13411 TheBuilder(F.getContext(), TargetFolder(TD),
13412 InstCombineIRInserter(Worklist));
13413 Builder = &TheBuilder;
13415 bool EverMadeChange = false;
13417 // Iterate while there is work to do.
13418 unsigned Iteration = 0;
13419 while (DoOneIteration(F, Iteration++))
13420 EverMadeChange = true;
13423 return EverMadeChange;
13426 FunctionPass *llvm::createInstructionCombiningPass() {
13427 return new InstCombiner();