1 //===- InstructionCombining.cpp - Combine multiple instructions -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // InstructionCombining - Combine instructions to form fewer, simple
11 // instructions. This pass does not modify the CFG. This pass is where
12 // algebraic simplification happens.
14 // This pass combines things like:
20 // This is a simple worklist driven algorithm.
22 // This pass guarantees that the following canonicalizations are performed on
24 // 1. If a binary operator has a constant operand, it is moved to the RHS
25 // 2. Bitwise operators with constant operands are always grouped so that
26 // shifts are performed first, then or's, then and's, then xor's.
27 // 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
28 // 4. All cmp instructions on boolean values are replaced with logical ops
29 // 5. add X, X is represented as (X*2) => (X << 1)
30 // 6. Multiplies with a power-of-two constant argument are transformed into
34 //===----------------------------------------------------------------------===//
36 #define DEBUG_TYPE "instcombine"
37 #include "llvm/Transforms/Scalar.h"
38 #include "llvm/IntrinsicInst.h"
39 #include "llvm/LLVMContext.h"
40 #include "llvm/Pass.h"
41 #include "llvm/DerivedTypes.h"
42 #include "llvm/GlobalVariable.h"
43 #include "llvm/Operator.h"
44 #include "llvm/Analysis/ConstantFolding.h"
45 #include "llvm/Analysis/MallocHelper.h"
46 #include "llvm/Analysis/ValueTracking.h"
47 #include "llvm/Target/TargetData.h"
48 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
49 #include "llvm/Transforms/Utils/Local.h"
50 #include "llvm/Support/CallSite.h"
51 #include "llvm/Support/ConstantRange.h"
52 #include "llvm/Support/Debug.h"
53 #include "llvm/Support/ErrorHandling.h"
54 #include "llvm/Support/GetElementPtrTypeIterator.h"
55 #include "llvm/Support/InstVisitor.h"
56 #include "llvm/Support/IRBuilder.h"
57 #include "llvm/Support/MathExtras.h"
58 #include "llvm/Support/PatternMatch.h"
59 #include "llvm/Support/raw_ostream.h"
60 #include "llvm/ADT/DenseMap.h"
61 #include "llvm/ADT/SmallVector.h"
62 #include "llvm/ADT/SmallPtrSet.h"
63 #include "llvm/ADT/Statistic.h"
64 #include "llvm/ADT/STLExtras.h"
68 using namespace llvm::PatternMatch;
70 STATISTIC(NumCombined , "Number of insts combined");
71 STATISTIC(NumConstProp, "Number of constant folds");
72 STATISTIC(NumDeadInst , "Number of dead inst eliminated");
73 STATISTIC(NumDeadStore, "Number of dead stores eliminated");
74 STATISTIC(NumSunkInst , "Number of instructions sunk");
77 /// InstCombineWorklist - This is the worklist management logic for
79 class InstCombineWorklist {
80 SmallVector<Instruction*, 256> Worklist;
81 DenseMap<Instruction*, unsigned> WorklistMap;
83 void operator=(const InstCombineWorklist&RHS); // DO NOT IMPLEMENT
84 InstCombineWorklist(const InstCombineWorklist&); // DO NOT IMPLEMENT
86 InstCombineWorklist() {}
88 bool isEmpty() const { return Worklist.empty(); }
90 /// Add - Add the specified instruction to the worklist if it isn't already
92 void Add(Instruction *I) {
93 if (WorklistMap.insert(std::make_pair(I, Worklist.size())).second) {
94 DEBUG(errs() << "IC: ADD: " << *I << '\n');
95 Worklist.push_back(I);
99 void AddValue(Value *V) {
100 if (Instruction *I = dyn_cast<Instruction>(V))
104 // Remove - remove I from the worklist if it exists.
105 void Remove(Instruction *I) {
106 DenseMap<Instruction*, unsigned>::iterator It = WorklistMap.find(I);
107 if (It == WorklistMap.end()) return; // Not in worklist.
109 // Don't bother moving everything down, just null out the slot.
110 Worklist[It->second] = 0;
112 WorklistMap.erase(It);
115 Instruction *RemoveOne() {
116 Instruction *I = Worklist.back();
118 WorklistMap.erase(I);
122 /// AddUsersToWorkList - When an instruction is simplified, add all users of
123 /// the instruction to the work lists because they might get more simplified
126 void AddUsersToWorkList(Instruction &I) {
127 for (Value::use_iterator UI = I.use_begin(), UE = I.use_end();
129 Add(cast<Instruction>(*UI));
133 /// Zap - check that the worklist is empty and nuke the backing store for
134 /// the map if it is large.
136 assert(WorklistMap.empty() && "Worklist empty, but map not?");
138 // Do an explicit clear, this shrinks the map if needed.
142 } // end anonymous namespace.
146 /// InstCombineIRInserter - This is an IRBuilder insertion helper that works
147 /// just like the normal insertion helper, but also adds any new instructions
148 /// to the instcombine worklist.
149 class InstCombineIRInserter : public IRBuilderDefaultInserter<true> {
150 InstCombineWorklist &Worklist;
152 InstCombineIRInserter(InstCombineWorklist &WL) : Worklist(WL) {}
154 void InsertHelper(Instruction *I, const Twine &Name,
155 BasicBlock *BB, BasicBlock::iterator InsertPt) const {
156 IRBuilderDefaultInserter<true>::InsertHelper(I, Name, BB, InsertPt);
160 } // end anonymous namespace
164 class InstCombiner : public FunctionPass,
165 public InstVisitor<InstCombiner, Instruction*> {
167 bool MustPreserveLCSSA;
170 /// Worklist - All of the instructions that need to be simplified.
171 InstCombineWorklist Worklist;
173 /// Builder - This is an IRBuilder that automatically inserts new
174 /// instructions into the worklist when they are created.
175 typedef IRBuilder<true, ConstantFolder, InstCombineIRInserter> BuilderTy;
178 static char ID; // Pass identification, replacement for typeid
179 InstCombiner() : FunctionPass(&ID), TD(0), Builder(0) {}
181 LLVMContext *Context;
182 LLVMContext *getContext() const { return Context; }
185 virtual bool runOnFunction(Function &F);
187 bool DoOneIteration(Function &F, unsigned ItNum);
189 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
190 AU.addPreservedID(LCSSAID);
191 AU.setPreservesCFG();
194 TargetData *getTargetData() const { return TD; }
196 // Visitation implementation - Implement instruction combining for different
197 // instruction types. The semantics are as follows:
199 // null - No change was made
200 // I - Change was made, I is still valid, I may be dead though
201 // otherwise - Change was made, replace I with returned instruction
203 Instruction *visitAdd(BinaryOperator &I);
204 Instruction *visitFAdd(BinaryOperator &I);
205 Instruction *visitSub(BinaryOperator &I);
206 Instruction *visitFSub(BinaryOperator &I);
207 Instruction *visitMul(BinaryOperator &I);
208 Instruction *visitFMul(BinaryOperator &I);
209 Instruction *visitURem(BinaryOperator &I);
210 Instruction *visitSRem(BinaryOperator &I);
211 Instruction *visitFRem(BinaryOperator &I);
212 bool SimplifyDivRemOfSelect(BinaryOperator &I);
213 Instruction *commonRemTransforms(BinaryOperator &I);
214 Instruction *commonIRemTransforms(BinaryOperator &I);
215 Instruction *commonDivTransforms(BinaryOperator &I);
216 Instruction *commonIDivTransforms(BinaryOperator &I);
217 Instruction *visitUDiv(BinaryOperator &I);
218 Instruction *visitSDiv(BinaryOperator &I);
219 Instruction *visitFDiv(BinaryOperator &I);
220 Instruction *FoldAndOfICmps(Instruction &I, ICmpInst *LHS, ICmpInst *RHS);
221 Instruction *FoldAndOfFCmps(Instruction &I, FCmpInst *LHS, FCmpInst *RHS);
222 Instruction *visitAnd(BinaryOperator &I);
223 Instruction *FoldOrOfICmps(Instruction &I, ICmpInst *LHS, ICmpInst *RHS);
224 Instruction *FoldOrOfFCmps(Instruction &I, FCmpInst *LHS, FCmpInst *RHS);
225 Instruction *FoldOrWithConstants(BinaryOperator &I, Value *Op,
226 Value *A, Value *B, Value *C);
227 Instruction *visitOr (BinaryOperator &I);
228 Instruction *visitXor(BinaryOperator &I);
229 Instruction *visitShl(BinaryOperator &I);
230 Instruction *visitAShr(BinaryOperator &I);
231 Instruction *visitLShr(BinaryOperator &I);
232 Instruction *commonShiftTransforms(BinaryOperator &I);
233 Instruction *FoldFCmp_IntToFP_Cst(FCmpInst &I, Instruction *LHSI,
235 Instruction *visitFCmpInst(FCmpInst &I);
236 Instruction *visitICmpInst(ICmpInst &I);
237 Instruction *visitICmpInstWithCastAndCast(ICmpInst &ICI);
238 Instruction *visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
241 Instruction *FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
242 ConstantInt *DivRHS);
244 Instruction *FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
245 ICmpInst::Predicate Cond, Instruction &I);
246 Instruction *FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
248 Instruction *commonCastTransforms(CastInst &CI);
249 Instruction *commonIntCastTransforms(CastInst &CI);
250 Instruction *commonPointerCastTransforms(CastInst &CI);
251 Instruction *visitTrunc(TruncInst &CI);
252 Instruction *visitZExt(ZExtInst &CI);
253 Instruction *visitSExt(SExtInst &CI);
254 Instruction *visitFPTrunc(FPTruncInst &CI);
255 Instruction *visitFPExt(CastInst &CI);
256 Instruction *visitFPToUI(FPToUIInst &FI);
257 Instruction *visitFPToSI(FPToSIInst &FI);
258 Instruction *visitUIToFP(CastInst &CI);
259 Instruction *visitSIToFP(CastInst &CI);
260 Instruction *visitPtrToInt(PtrToIntInst &CI);
261 Instruction *visitIntToPtr(IntToPtrInst &CI);
262 Instruction *visitBitCast(BitCastInst &CI);
263 Instruction *FoldSelectOpOp(SelectInst &SI, Instruction *TI,
265 Instruction *FoldSelectIntoOp(SelectInst &SI, Value*, Value*);
266 Instruction *visitSelectInst(SelectInst &SI);
267 Instruction *visitSelectInstWithICmp(SelectInst &SI, ICmpInst *ICI);
268 Instruction *visitCallInst(CallInst &CI);
269 Instruction *visitInvokeInst(InvokeInst &II);
270 Instruction *visitPHINode(PHINode &PN);
271 Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP);
272 Instruction *visitAllocationInst(AllocationInst &AI);
273 Instruction *visitFreeInst(FreeInst &FI);
274 Instruction *visitLoadInst(LoadInst &LI);
275 Instruction *visitStoreInst(StoreInst &SI);
276 Instruction *visitBranchInst(BranchInst &BI);
277 Instruction *visitSwitchInst(SwitchInst &SI);
278 Instruction *visitInsertElementInst(InsertElementInst &IE);
279 Instruction *visitExtractElementInst(ExtractElementInst &EI);
280 Instruction *visitShuffleVectorInst(ShuffleVectorInst &SVI);
281 Instruction *visitExtractValueInst(ExtractValueInst &EV);
283 // visitInstruction - Specify what to return for unhandled instructions...
284 Instruction *visitInstruction(Instruction &I) { return 0; }
287 Instruction *visitCallSite(CallSite CS);
288 bool transformConstExprCastCall(CallSite CS);
289 Instruction *transformCallThroughTrampoline(CallSite CS);
290 Instruction *transformZExtICmp(ICmpInst *ICI, Instruction &CI,
291 bool DoXform = true);
292 bool WillNotOverflowSignedAdd(Value *LHS, Value *RHS);
293 DbgDeclareInst *hasOneUsePlusDeclare(Value *V);
297 // InsertNewInstBefore - insert an instruction New before instruction Old
298 // in the program. Add the new instruction to the worklist.
300 Instruction *InsertNewInstBefore(Instruction *New, Instruction &Old) {
301 assert(New && New->getParent() == 0 &&
302 "New instruction already inserted into a basic block!");
303 BasicBlock *BB = Old.getParent();
304 BB->getInstList().insert(&Old, New); // Insert inst
309 // ReplaceInstUsesWith - This method is to be used when an instruction is
310 // found to be dead, replacable with another preexisting expression. Here
311 // we add all uses of I to the worklist, replace all uses of I with the new
312 // value, then return I, so that the inst combiner will know that I was
315 Instruction *ReplaceInstUsesWith(Instruction &I, Value *V) {
316 Worklist.AddUsersToWorkList(I); // Add all modified instrs to worklist.
318 // If we are replacing the instruction with itself, this must be in a
319 // segment of unreachable code, so just clobber the instruction.
321 V = UndefValue::get(I.getType());
323 I.replaceAllUsesWith(V);
327 // EraseInstFromFunction - When dealing with an instruction that has side
328 // effects or produces a void value, we can't rely on DCE to delete the
329 // instruction. Instead, visit methods should return the value returned by
331 Instruction *EraseInstFromFunction(Instruction &I) {
332 DEBUG(errs() << "IC: ERASE " << I << '\n');
334 assert(I.use_empty() && "Cannot erase instruction that is used!");
335 // Make sure that we reprocess all operands now that we reduced their
337 if (I.getNumOperands() < 8) {
338 for (User::op_iterator i = I.op_begin(), e = I.op_end(); i != e; ++i)
339 if (Instruction *Op = dyn_cast<Instruction>(*i))
345 return 0; // Don't do anything with FI
348 void ComputeMaskedBits(Value *V, const APInt &Mask, APInt &KnownZero,
349 APInt &KnownOne, unsigned Depth = 0) const {
350 return llvm::ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD, Depth);
353 bool MaskedValueIsZero(Value *V, const APInt &Mask,
354 unsigned Depth = 0) const {
355 return llvm::MaskedValueIsZero(V, Mask, TD, Depth);
357 unsigned ComputeNumSignBits(Value *Op, unsigned Depth = 0) const {
358 return llvm::ComputeNumSignBits(Op, TD, Depth);
363 /// SimplifyCommutative - This performs a few simplifications for
364 /// commutative operators.
365 bool SimplifyCommutative(BinaryOperator &I);
367 /// SimplifyCompare - This reorders the operands of a CmpInst to get them in
368 /// most-complex to least-complex order.
369 bool SimplifyCompare(CmpInst &I);
371 /// SimplifyDemandedUseBits - Attempts to replace V with a simpler value
372 /// based on the demanded bits.
373 Value *SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
374 APInt& KnownZero, APInt& KnownOne,
376 bool SimplifyDemandedBits(Use &U, APInt DemandedMask,
377 APInt& KnownZero, APInt& KnownOne,
380 /// SimplifyDemandedInstructionBits - Inst is an integer instruction that
381 /// SimplifyDemandedBits knows about. See if the instruction has any
382 /// properties that allow us to simplify its operands.
383 bool SimplifyDemandedInstructionBits(Instruction &Inst);
385 Value *SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
386 APInt& UndefElts, unsigned Depth = 0);
388 // FoldOpIntoPhi - Given a binary operator, cast instruction, or select
389 // which has a PHI node as operand #0, see if we can fold the instruction
390 // into the PHI (which is only possible if all operands to the PHI are
393 // If AllowAggressive is true, FoldOpIntoPhi will allow certain transforms
394 // that would normally be unprofitable because they strongly encourage jump
396 Instruction *FoldOpIntoPhi(Instruction &I, bool AllowAggressive = false);
398 // FoldPHIArgOpIntoPHI - If all operands to a PHI node are the same "unary"
399 // operator and they all are only used by the PHI, PHI together their
400 // inputs, and do the operation once, to the result of the PHI.
401 Instruction *FoldPHIArgOpIntoPHI(PHINode &PN);
402 Instruction *FoldPHIArgBinOpIntoPHI(PHINode &PN);
403 Instruction *FoldPHIArgGEPIntoPHI(PHINode &PN);
406 Instruction *OptAndOp(Instruction *Op, ConstantInt *OpRHS,
407 ConstantInt *AndRHS, BinaryOperator &TheAnd);
409 Value *FoldLogicalPlusAnd(Value *LHS, Value *RHS, ConstantInt *Mask,
410 bool isSub, Instruction &I);
411 Instruction *InsertRangeTest(Value *V, Constant *Lo, Constant *Hi,
412 bool isSigned, bool Inside, Instruction &IB);
413 Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocationInst &AI);
414 Instruction *MatchBSwap(BinaryOperator &I);
415 bool SimplifyStoreAtEndOfBlock(StoreInst &SI);
416 Instruction *SimplifyMemTransfer(MemIntrinsic *MI);
417 Instruction *SimplifyMemSet(MemSetInst *MI);
420 Value *EvaluateInDifferentType(Value *V, const Type *Ty, bool isSigned);
422 bool CanEvaluateInDifferentType(Value *V, const Type *Ty,
423 unsigned CastOpc, int &NumCastsRemoved);
424 unsigned GetOrEnforceKnownAlignment(Value *V,
425 unsigned PrefAlign = 0);
428 } // end anonymous namespace
430 char InstCombiner::ID = 0;
431 static RegisterPass<InstCombiner>
432 X("instcombine", "Combine redundant instructions");
434 // getComplexity: Assign a complexity or rank value to LLVM Values...
435 // 0 -> undef, 1 -> Const, 2 -> Other, 3 -> Arg, 3 -> Unary, 4 -> OtherInst
436 static unsigned getComplexity(Value *V) {
437 if (isa<Instruction>(V)) {
438 if (BinaryOperator::isNeg(V) ||
439 BinaryOperator::isFNeg(V) ||
440 BinaryOperator::isNot(V))
444 if (isa<Argument>(V)) return 3;
445 return isa<Constant>(V) ? (isa<UndefValue>(V) ? 0 : 1) : 2;
448 // isOnlyUse - Return true if this instruction will be deleted if we stop using
450 static bool isOnlyUse(Value *V) {
451 return V->hasOneUse() || isa<Constant>(V);
454 // getPromotedType - Return the specified type promoted as it would be to pass
455 // though a va_arg area...
456 static const Type *getPromotedType(const Type *Ty) {
457 if (const IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
458 if (ITy->getBitWidth() < 32)
459 return Type::getInt32Ty(Ty->getContext());
464 /// getBitCastOperand - If the specified operand is a CastInst, a constant
465 /// expression bitcast, or a GetElementPtrInst with all zero indices, return the
466 /// operand value, otherwise return null.
467 static Value *getBitCastOperand(Value *V) {
468 if (Operator *O = dyn_cast<Operator>(V)) {
469 if (O->getOpcode() == Instruction::BitCast)
470 return O->getOperand(0);
471 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V))
472 if (GEP->hasAllZeroIndices())
473 return GEP->getPointerOperand();
478 /// This function is a wrapper around CastInst::isEliminableCastPair. It
479 /// simply extracts arguments and returns what that function returns.
480 static Instruction::CastOps
481 isEliminableCastPair(
482 const CastInst *CI, ///< The first cast instruction
483 unsigned opcode, ///< The opcode of the second cast instruction
484 const Type *DstTy, ///< The target type for the second cast instruction
485 TargetData *TD ///< The target data for pointer size
488 const Type *SrcTy = CI->getOperand(0)->getType(); // A from above
489 const Type *MidTy = CI->getType(); // B from above
491 // Get the opcodes of the two Cast instructions
492 Instruction::CastOps firstOp = Instruction::CastOps(CI->getOpcode());
493 Instruction::CastOps secondOp = Instruction::CastOps(opcode);
495 unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy,
497 TD ? TD->getIntPtrType(CI->getContext()) : 0);
499 // We don't want to form an inttoptr or ptrtoint that converts to an integer
500 // type that differs from the pointer size.
501 if ((Res == Instruction::IntToPtr &&
502 (!TD || SrcTy != TD->getIntPtrType(CI->getContext()))) ||
503 (Res == Instruction::PtrToInt &&
504 (!TD || DstTy != TD->getIntPtrType(CI->getContext()))))
507 return Instruction::CastOps(Res);
510 /// ValueRequiresCast - Return true if the cast from "V to Ty" actually results
511 /// in any code being generated. It does not require codegen if V is simple
512 /// enough or if the cast can be folded into other casts.
513 static bool ValueRequiresCast(Instruction::CastOps opcode, const Value *V,
514 const Type *Ty, TargetData *TD) {
515 if (V->getType() == Ty || isa<Constant>(V)) return false;
517 // If this is another cast that can be eliminated, it isn't codegen either.
518 if (const CastInst *CI = dyn_cast<CastInst>(V))
519 if (isEliminableCastPair(CI, opcode, Ty, TD))
524 // SimplifyCommutative - This performs a few simplifications for commutative
527 // 1. Order operands such that they are listed from right (least complex) to
528 // left (most complex). This puts constants before unary operators before
531 // 2. Transform: (op (op V, C1), C2) ==> (op V, (op C1, C2))
532 // 3. Transform: (op (op V1, C1), (op V2, C2)) ==> (op (op V1, V2), (op C1,C2))
534 bool InstCombiner::SimplifyCommutative(BinaryOperator &I) {
535 bool Changed = false;
536 if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1)))
537 Changed = !I.swapOperands();
539 if (!I.isAssociative()) return Changed;
540 Instruction::BinaryOps Opcode = I.getOpcode();
541 if (BinaryOperator *Op = dyn_cast<BinaryOperator>(I.getOperand(0)))
542 if (Op->getOpcode() == Opcode && isa<Constant>(Op->getOperand(1))) {
543 if (isa<Constant>(I.getOperand(1))) {
544 Constant *Folded = ConstantExpr::get(I.getOpcode(),
545 cast<Constant>(I.getOperand(1)),
546 cast<Constant>(Op->getOperand(1)));
547 I.setOperand(0, Op->getOperand(0));
548 I.setOperand(1, Folded);
550 } else if (BinaryOperator *Op1=dyn_cast<BinaryOperator>(I.getOperand(1)))
551 if (Op1->getOpcode() == Opcode && isa<Constant>(Op1->getOperand(1)) &&
552 isOnlyUse(Op) && isOnlyUse(Op1)) {
553 Constant *C1 = cast<Constant>(Op->getOperand(1));
554 Constant *C2 = cast<Constant>(Op1->getOperand(1));
556 // Fold (op (op V1, C1), (op V2, C2)) ==> (op (op V1, V2), (op C1,C2))
557 Constant *Folded = ConstantExpr::get(I.getOpcode(), C1, C2);
558 Instruction *New = BinaryOperator::Create(Opcode, Op->getOperand(0),
562 I.setOperand(0, New);
563 I.setOperand(1, Folded);
570 /// SimplifyCompare - For a CmpInst this function just orders the operands
571 /// so that theyare listed from right (least complex) to left (most complex).
572 /// This puts constants before unary operators before binary operators.
573 bool InstCombiner::SimplifyCompare(CmpInst &I) {
574 if (getComplexity(I.getOperand(0)) >= getComplexity(I.getOperand(1)))
577 // Compare instructions are not associative so there's nothing else we can do.
581 // dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction
582 // if the LHS is a constant zero (which is the 'negate' form).
584 static inline Value *dyn_castNegVal(Value *V) {
585 if (BinaryOperator::isNeg(V))
586 return BinaryOperator::getNegArgument(V);
588 // Constants can be considered to be negated values if they can be folded.
589 if (ConstantInt *C = dyn_cast<ConstantInt>(V))
590 return ConstantExpr::getNeg(C);
592 if (ConstantVector *C = dyn_cast<ConstantVector>(V))
593 if (C->getType()->getElementType()->isInteger())
594 return ConstantExpr::getNeg(C);
599 // dyn_castFNegVal - Given a 'fsub' instruction, return the RHS of the
600 // instruction if the LHS is a constant negative zero (which is the 'negate'
603 static inline Value *dyn_castFNegVal(Value *V) {
604 if (BinaryOperator::isFNeg(V))
605 return BinaryOperator::getFNegArgument(V);
607 // Constants can be considered to be negated values if they can be folded.
608 if (ConstantFP *C = dyn_cast<ConstantFP>(V))
609 return ConstantExpr::getFNeg(C);
611 if (ConstantVector *C = dyn_cast<ConstantVector>(V))
612 if (C->getType()->getElementType()->isFloatingPoint())
613 return ConstantExpr::getFNeg(C);
618 static inline Value *dyn_castNotVal(Value *V) {
619 if (BinaryOperator::isNot(V))
620 return BinaryOperator::getNotArgument(V);
622 // Constants can be considered to be not'ed values...
623 if (ConstantInt *C = dyn_cast<ConstantInt>(V))
624 return ConstantInt::get(C->getType(), ~C->getValue());
628 // dyn_castFoldableMul - If this value is a multiply that can be folded into
629 // other computations (because it has a constant operand), return the
630 // non-constant operand of the multiply, and set CST to point to the multiplier.
631 // Otherwise, return null.
633 static inline Value *dyn_castFoldableMul(Value *V, ConstantInt *&CST) {
634 if (V->hasOneUse() && V->getType()->isInteger())
635 if (Instruction *I = dyn_cast<Instruction>(V)) {
636 if (I->getOpcode() == Instruction::Mul)
637 if ((CST = dyn_cast<ConstantInt>(I->getOperand(1))))
638 return I->getOperand(0);
639 if (I->getOpcode() == Instruction::Shl)
640 if ((CST = dyn_cast<ConstantInt>(I->getOperand(1)))) {
641 // The multiplier is really 1 << CST.
642 uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
643 uint32_t CSTVal = CST->getLimitedValue(BitWidth);
644 CST = ConstantInt::get(V->getType()->getContext(),
645 APInt(BitWidth, 1).shl(CSTVal));
646 return I->getOperand(0);
652 /// AddOne - Add one to a ConstantInt
653 static Constant *AddOne(Constant *C) {
654 return ConstantExpr::getAdd(C,
655 ConstantInt::get(C->getType(), 1));
657 /// SubOne - Subtract one from a ConstantInt
658 static Constant *SubOne(ConstantInt *C) {
659 return ConstantExpr::getSub(C,
660 ConstantInt::get(C->getType(), 1));
662 /// MultiplyOverflows - True if the multiply can not be expressed in an int
664 static bool MultiplyOverflows(ConstantInt *C1, ConstantInt *C2, bool sign) {
665 uint32_t W = C1->getBitWidth();
666 APInt LHSExt = C1->getValue(), RHSExt = C2->getValue();
675 APInt MulExt = LHSExt * RHSExt;
678 APInt Min = APInt::getSignedMinValue(W).sext(W * 2);
679 APInt Max = APInt::getSignedMaxValue(W).sext(W * 2);
680 return MulExt.slt(Min) || MulExt.sgt(Max);
682 return MulExt.ugt(APInt::getLowBitsSet(W * 2, W));
686 /// ShrinkDemandedConstant - Check to see if the specified operand of the
687 /// specified instruction is a constant integer. If so, check to see if there
688 /// are any bits set in the constant that are not demanded. If so, shrink the
689 /// constant and return true.
690 static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo,
692 assert(I && "No instruction?");
693 assert(OpNo < I->getNumOperands() && "Operand index too large");
695 // If the operand is not a constant integer, nothing to do.
696 ConstantInt *OpC = dyn_cast<ConstantInt>(I->getOperand(OpNo));
697 if (!OpC) return false;
699 // If there are no bits set that aren't demanded, nothing to do.
700 Demanded.zextOrTrunc(OpC->getValue().getBitWidth());
701 if ((~Demanded & OpC->getValue()) == 0)
704 // This instruction is producing bits that are not demanded. Shrink the RHS.
705 Demanded &= OpC->getValue();
706 I->setOperand(OpNo, ConstantInt::get(OpC->getType(), Demanded));
710 // ComputeSignedMinMaxValuesFromKnownBits - Given a signed integer type and a
711 // set of known zero and one bits, compute the maximum and minimum values that
712 // could have the specified known zero and known one bits, returning them in
714 static void ComputeSignedMinMaxValuesFromKnownBits(const APInt& KnownZero,
715 const APInt& KnownOne,
716 APInt& Min, APInt& Max) {
717 assert(KnownZero.getBitWidth() == KnownOne.getBitWidth() &&
718 KnownZero.getBitWidth() == Min.getBitWidth() &&
719 KnownZero.getBitWidth() == Max.getBitWidth() &&
720 "KnownZero, KnownOne and Min, Max must have equal bitwidth.");
721 APInt UnknownBits = ~(KnownZero|KnownOne);
723 // The minimum value is when all unknown bits are zeros, EXCEPT for the sign
724 // bit if it is unknown.
726 Max = KnownOne|UnknownBits;
728 if (UnknownBits.isNegative()) { // Sign bit is unknown
729 Min.set(Min.getBitWidth()-1);
730 Max.clear(Max.getBitWidth()-1);
734 // ComputeUnsignedMinMaxValuesFromKnownBits - Given an unsigned integer type and
735 // a set of known zero and one bits, compute the maximum and minimum values that
736 // could have the specified known zero and known one bits, returning them in
738 static void ComputeUnsignedMinMaxValuesFromKnownBits(const APInt &KnownZero,
739 const APInt &KnownOne,
740 APInt &Min, APInt &Max) {
741 assert(KnownZero.getBitWidth() == KnownOne.getBitWidth() &&
742 KnownZero.getBitWidth() == Min.getBitWidth() &&
743 KnownZero.getBitWidth() == Max.getBitWidth() &&
744 "Ty, KnownZero, KnownOne and Min, Max must have equal bitwidth.");
745 APInt UnknownBits = ~(KnownZero|KnownOne);
747 // The minimum value is when the unknown bits are all zeros.
749 // The maximum value is when the unknown bits are all ones.
750 Max = KnownOne|UnknownBits;
753 /// SimplifyDemandedInstructionBits - Inst is an integer instruction that
754 /// SimplifyDemandedBits knows about. See if the instruction has any
755 /// properties that allow us to simplify its operands.
756 bool InstCombiner::SimplifyDemandedInstructionBits(Instruction &Inst) {
757 unsigned BitWidth = Inst.getType()->getScalarSizeInBits();
758 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
759 APInt DemandedMask(APInt::getAllOnesValue(BitWidth));
761 Value *V = SimplifyDemandedUseBits(&Inst, DemandedMask,
762 KnownZero, KnownOne, 0);
763 if (V == 0) return false;
764 if (V == &Inst) return true;
765 ReplaceInstUsesWith(Inst, V);
769 /// SimplifyDemandedBits - This form of SimplifyDemandedBits simplifies the
770 /// specified instruction operand if possible, updating it in place. It returns
771 /// true if it made any change and false otherwise.
772 bool InstCombiner::SimplifyDemandedBits(Use &U, APInt DemandedMask,
773 APInt &KnownZero, APInt &KnownOne,
775 Value *NewVal = SimplifyDemandedUseBits(U.get(), DemandedMask,
776 KnownZero, KnownOne, Depth);
777 if (NewVal == 0) return false;
783 /// SimplifyDemandedUseBits - This function attempts to replace V with a simpler
784 /// value based on the demanded bits. When this function is called, it is known
785 /// that only the bits set in DemandedMask of the result of V are ever used
786 /// downstream. Consequently, depending on the mask and V, it may be possible
787 /// to replace V with a constant or one of its operands. In such cases, this
788 /// function does the replacement and returns true. In all other cases, it
789 /// returns false after analyzing the expression and setting KnownOne and known
790 /// to be one in the expression. KnownZero contains all the bits that are known
791 /// to be zero in the expression. These are provided to potentially allow the
792 /// caller (which might recursively be SimplifyDemandedBits itself) to simplify
793 /// the expression. KnownOne and KnownZero always follow the invariant that
794 /// KnownOne & KnownZero == 0. That is, a bit can't be both 1 and 0. Note that
795 /// the bits in KnownOne and KnownZero may only be accurate for those bits set
796 /// in DemandedMask. Note also that the bitwidth of V, DemandedMask, KnownZero
797 /// and KnownOne must all be the same.
799 /// This returns null if it did not change anything and it permits no
800 /// simplification. This returns V itself if it did some simplification of V's
801 /// operands based on the information about what bits are demanded. This returns
802 /// some other non-null value if it found out that V is equal to another value
803 /// in the context where the specified bits are demanded, but not for all users.
804 Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
805 APInt &KnownZero, APInt &KnownOne,
807 assert(V != 0 && "Null pointer of Value???");
808 assert(Depth <= 6 && "Limit Search Depth");
809 uint32_t BitWidth = DemandedMask.getBitWidth();
810 const Type *VTy = V->getType();
811 assert((TD || !isa<PointerType>(VTy)) &&
812 "SimplifyDemandedBits needs to know bit widths!");
813 assert((!TD || TD->getTypeSizeInBits(VTy->getScalarType()) == BitWidth) &&
814 (!VTy->isIntOrIntVector() ||
815 VTy->getScalarSizeInBits() == BitWidth) &&
816 KnownZero.getBitWidth() == BitWidth &&
817 KnownOne.getBitWidth() == BitWidth &&
818 "Value *V, DemandedMask, KnownZero and KnownOne "
819 "must have same BitWidth");
820 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
821 // We know all of the bits for a constant!
822 KnownOne = CI->getValue() & DemandedMask;
823 KnownZero = ~KnownOne & DemandedMask;
826 if (isa<ConstantPointerNull>(V)) {
827 // We know all of the bits for a constant!
829 KnownZero = DemandedMask;
835 if (DemandedMask == 0) { // Not demanding any bits from V.
836 if (isa<UndefValue>(V))
838 return UndefValue::get(VTy);
841 if (Depth == 6) // Limit search depth.
844 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
845 APInt &RHSKnownZero = KnownZero, &RHSKnownOne = KnownOne;
847 Instruction *I = dyn_cast<Instruction>(V);
849 ComputeMaskedBits(V, DemandedMask, RHSKnownZero, RHSKnownOne, Depth);
850 return 0; // Only analyze instructions.
853 // If there are multiple uses of this value and we aren't at the root, then
854 // we can't do any simplifications of the operands, because DemandedMask
855 // only reflects the bits demanded by *one* of the users.
856 if (Depth != 0 && !I->hasOneUse()) {
857 // Despite the fact that we can't simplify this instruction in all User's
858 // context, we can at least compute the knownzero/knownone bits, and we can
859 // do simplifications that apply to *just* the one user if we know that
860 // this instruction has a simpler value in that context.
861 if (I->getOpcode() == Instruction::And) {
862 // If either the LHS or the RHS are Zero, the result is zero.
863 ComputeMaskedBits(I->getOperand(1), DemandedMask,
864 RHSKnownZero, RHSKnownOne, Depth+1);
865 ComputeMaskedBits(I->getOperand(0), DemandedMask & ~RHSKnownZero,
866 LHSKnownZero, LHSKnownOne, Depth+1);
868 // If all of the demanded bits are known 1 on one side, return the other.
869 // These bits cannot contribute to the result of the 'and' in this
871 if ((DemandedMask & ~LHSKnownZero & RHSKnownOne) ==
872 (DemandedMask & ~LHSKnownZero))
873 return I->getOperand(0);
874 if ((DemandedMask & ~RHSKnownZero & LHSKnownOne) ==
875 (DemandedMask & ~RHSKnownZero))
876 return I->getOperand(1);
878 // If all of the demanded bits in the inputs are known zeros, return zero.
879 if ((DemandedMask & (RHSKnownZero|LHSKnownZero)) == DemandedMask)
880 return Constant::getNullValue(VTy);
882 } else if (I->getOpcode() == Instruction::Or) {
883 // We can simplify (X|Y) -> X or Y in the user's context if we know that
884 // only bits from X or Y are demanded.
886 // If either the LHS or the RHS are One, the result is One.
887 ComputeMaskedBits(I->getOperand(1), DemandedMask,
888 RHSKnownZero, RHSKnownOne, Depth+1);
889 ComputeMaskedBits(I->getOperand(0), DemandedMask & ~RHSKnownOne,
890 LHSKnownZero, LHSKnownOne, Depth+1);
892 // If all of the demanded bits are known zero on one side, return the
893 // other. These bits cannot contribute to the result of the 'or' in this
895 if ((DemandedMask & ~LHSKnownOne & RHSKnownZero) ==
896 (DemandedMask & ~LHSKnownOne))
897 return I->getOperand(0);
898 if ((DemandedMask & ~RHSKnownOne & LHSKnownZero) ==
899 (DemandedMask & ~RHSKnownOne))
900 return I->getOperand(1);
902 // If all of the potentially set bits on one side are known to be set on
903 // the other side, just use the 'other' side.
904 if ((DemandedMask & (~RHSKnownZero) & LHSKnownOne) ==
905 (DemandedMask & (~RHSKnownZero)))
906 return I->getOperand(0);
907 if ((DemandedMask & (~LHSKnownZero) & RHSKnownOne) ==
908 (DemandedMask & (~LHSKnownZero)))
909 return I->getOperand(1);
912 // Compute the KnownZero/KnownOne bits to simplify things downstream.
913 ComputeMaskedBits(I, DemandedMask, KnownZero, KnownOne, Depth);
917 // If this is the root being simplified, allow it to have multiple uses,
918 // just set the DemandedMask to all bits so that we can try to simplify the
919 // operands. This allows visitTruncInst (for example) to simplify the
920 // operand of a trunc without duplicating all the logic below.
921 if (Depth == 0 && !V->hasOneUse())
922 DemandedMask = APInt::getAllOnesValue(BitWidth);
924 switch (I->getOpcode()) {
926 ComputeMaskedBits(I, DemandedMask, RHSKnownZero, RHSKnownOne, Depth);
928 case Instruction::And:
929 // If either the LHS or the RHS are Zero, the result is zero.
930 if (SimplifyDemandedBits(I->getOperandUse(1), DemandedMask,
931 RHSKnownZero, RHSKnownOne, Depth+1) ||
932 SimplifyDemandedBits(I->getOperandUse(0), DemandedMask & ~RHSKnownZero,
933 LHSKnownZero, LHSKnownOne, Depth+1))
935 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
936 assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
938 // If all of the demanded bits are known 1 on one side, return the other.
939 // These bits cannot contribute to the result of the 'and'.
940 if ((DemandedMask & ~LHSKnownZero & RHSKnownOne) ==
941 (DemandedMask & ~LHSKnownZero))
942 return I->getOperand(0);
943 if ((DemandedMask & ~RHSKnownZero & LHSKnownOne) ==
944 (DemandedMask & ~RHSKnownZero))
945 return I->getOperand(1);
947 // If all of the demanded bits in the inputs are known zeros, return zero.
948 if ((DemandedMask & (RHSKnownZero|LHSKnownZero)) == DemandedMask)
949 return Constant::getNullValue(VTy);
951 // If the RHS is a constant, see if we can simplify it.
952 if (ShrinkDemandedConstant(I, 1, DemandedMask & ~LHSKnownZero))
955 // Output known-1 bits are only known if set in both the LHS & RHS.
956 RHSKnownOne &= LHSKnownOne;
957 // Output known-0 are known to be clear if zero in either the LHS | RHS.
958 RHSKnownZero |= LHSKnownZero;
960 case Instruction::Or:
961 // If either the LHS or the RHS are One, the result is One.
962 if (SimplifyDemandedBits(I->getOperandUse(1), DemandedMask,
963 RHSKnownZero, RHSKnownOne, Depth+1) ||
964 SimplifyDemandedBits(I->getOperandUse(0), DemandedMask & ~RHSKnownOne,
965 LHSKnownZero, LHSKnownOne, Depth+1))
967 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
968 assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
970 // If all of the demanded bits are known zero on one side, return the other.
971 // These bits cannot contribute to the result of the 'or'.
972 if ((DemandedMask & ~LHSKnownOne & RHSKnownZero) ==
973 (DemandedMask & ~LHSKnownOne))
974 return I->getOperand(0);
975 if ((DemandedMask & ~RHSKnownOne & LHSKnownZero) ==
976 (DemandedMask & ~RHSKnownOne))
977 return I->getOperand(1);
979 // If all of the potentially set bits on one side are known to be set on
980 // the other side, just use the 'other' side.
981 if ((DemandedMask & (~RHSKnownZero) & LHSKnownOne) ==
982 (DemandedMask & (~RHSKnownZero)))
983 return I->getOperand(0);
984 if ((DemandedMask & (~LHSKnownZero) & RHSKnownOne) ==
985 (DemandedMask & (~LHSKnownZero)))
986 return I->getOperand(1);
988 // If the RHS is a constant, see if we can simplify it.
989 if (ShrinkDemandedConstant(I, 1, DemandedMask))
992 // Output known-0 bits are only known if clear in both the LHS & RHS.
993 RHSKnownZero &= LHSKnownZero;
994 // Output known-1 are known to be set if set in either the LHS | RHS.
995 RHSKnownOne |= LHSKnownOne;
997 case Instruction::Xor: {
998 if (SimplifyDemandedBits(I->getOperandUse(1), DemandedMask,
999 RHSKnownZero, RHSKnownOne, Depth+1) ||
1000 SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
1001 LHSKnownZero, LHSKnownOne, Depth+1))
1003 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1004 assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
1006 // If all of the demanded bits are known zero on one side, return the other.
1007 // These bits cannot contribute to the result of the 'xor'.
1008 if ((DemandedMask & RHSKnownZero) == DemandedMask)
1009 return I->getOperand(0);
1010 if ((DemandedMask & LHSKnownZero) == DemandedMask)
1011 return I->getOperand(1);
1013 // Output known-0 bits are known if clear or set in both the LHS & RHS.
1014 APInt KnownZeroOut = (RHSKnownZero & LHSKnownZero) |
1015 (RHSKnownOne & LHSKnownOne);
1016 // Output known-1 are known to be set if set in only one of the LHS, RHS.
1017 APInt KnownOneOut = (RHSKnownZero & LHSKnownOne) |
1018 (RHSKnownOne & LHSKnownZero);
1020 // If all of the demanded bits are known to be zero on one side or the
1021 // other, turn this into an *inclusive* or.
1022 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
1023 if ((DemandedMask & ~RHSKnownZero & ~LHSKnownZero) == 0) {
1025 BinaryOperator::CreateOr(I->getOperand(0), I->getOperand(1),
1027 return InsertNewInstBefore(Or, *I);
1030 // If all of the demanded bits on one side are known, and all of the set
1031 // bits on that side are also known to be set on the other side, turn this
1032 // into an AND, as we know the bits will be cleared.
1033 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
1034 if ((DemandedMask & (RHSKnownZero|RHSKnownOne)) == DemandedMask) {
1036 if ((RHSKnownOne & LHSKnownOne) == RHSKnownOne) {
1037 Constant *AndC = Constant::getIntegerValue(VTy,
1038 ~RHSKnownOne & DemandedMask);
1040 BinaryOperator::CreateAnd(I->getOperand(0), AndC, "tmp");
1041 return InsertNewInstBefore(And, *I);
1045 // If the RHS is a constant, see if we can simplify it.
1046 // FIXME: for XOR, we prefer to force bits to 1 if they will make a -1.
1047 if (ShrinkDemandedConstant(I, 1, DemandedMask))
1050 RHSKnownZero = KnownZeroOut;
1051 RHSKnownOne = KnownOneOut;
1054 case Instruction::Select:
1055 if (SimplifyDemandedBits(I->getOperandUse(2), DemandedMask,
1056 RHSKnownZero, RHSKnownOne, Depth+1) ||
1057 SimplifyDemandedBits(I->getOperandUse(1), DemandedMask,
1058 LHSKnownZero, LHSKnownOne, Depth+1))
1060 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1061 assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
1063 // If the operands are constants, see if we can simplify them.
1064 if (ShrinkDemandedConstant(I, 1, DemandedMask) ||
1065 ShrinkDemandedConstant(I, 2, DemandedMask))
1068 // Only known if known in both the LHS and RHS.
1069 RHSKnownOne &= LHSKnownOne;
1070 RHSKnownZero &= LHSKnownZero;
1072 case Instruction::Trunc: {
1073 unsigned truncBf = I->getOperand(0)->getType()->getScalarSizeInBits();
1074 DemandedMask.zext(truncBf);
1075 RHSKnownZero.zext(truncBf);
1076 RHSKnownOne.zext(truncBf);
1077 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
1078 RHSKnownZero, RHSKnownOne, Depth+1))
1080 DemandedMask.trunc(BitWidth);
1081 RHSKnownZero.trunc(BitWidth);
1082 RHSKnownOne.trunc(BitWidth);
1083 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1086 case Instruction::BitCast:
1087 if (!I->getOperand(0)->getType()->isIntOrIntVector())
1088 return false; // vector->int or fp->int?
1090 if (const VectorType *DstVTy = dyn_cast<VectorType>(I->getType())) {
1091 if (const VectorType *SrcVTy =
1092 dyn_cast<VectorType>(I->getOperand(0)->getType())) {
1093 if (DstVTy->getNumElements() != SrcVTy->getNumElements())
1094 // Don't touch a bitcast between vectors of different element counts.
1097 // Don't touch a scalar-to-vector bitcast.
1099 } else if (isa<VectorType>(I->getOperand(0)->getType()))
1100 // Don't touch a vector-to-scalar bitcast.
1103 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
1104 RHSKnownZero, RHSKnownOne, Depth+1))
1106 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1108 case Instruction::ZExt: {
1109 // Compute the bits in the result that are not present in the input.
1110 unsigned SrcBitWidth =I->getOperand(0)->getType()->getScalarSizeInBits();
1112 DemandedMask.trunc(SrcBitWidth);
1113 RHSKnownZero.trunc(SrcBitWidth);
1114 RHSKnownOne.trunc(SrcBitWidth);
1115 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
1116 RHSKnownZero, RHSKnownOne, Depth+1))
1118 DemandedMask.zext(BitWidth);
1119 RHSKnownZero.zext(BitWidth);
1120 RHSKnownOne.zext(BitWidth);
1121 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1122 // The top bits are known to be zero.
1123 RHSKnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
1126 case Instruction::SExt: {
1127 // Compute the bits in the result that are not present in the input.
1128 unsigned SrcBitWidth =I->getOperand(0)->getType()->getScalarSizeInBits();
1130 APInt InputDemandedBits = DemandedMask &
1131 APInt::getLowBitsSet(BitWidth, SrcBitWidth);
1133 APInt NewBits(APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth));
1134 // If any of the sign extended bits are demanded, we know that the sign
1136 if ((NewBits & DemandedMask) != 0)
1137 InputDemandedBits.set(SrcBitWidth-1);
1139 InputDemandedBits.trunc(SrcBitWidth);
1140 RHSKnownZero.trunc(SrcBitWidth);
1141 RHSKnownOne.trunc(SrcBitWidth);
1142 if (SimplifyDemandedBits(I->getOperandUse(0), InputDemandedBits,
1143 RHSKnownZero, RHSKnownOne, Depth+1))
1145 InputDemandedBits.zext(BitWidth);
1146 RHSKnownZero.zext(BitWidth);
1147 RHSKnownOne.zext(BitWidth);
1148 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1150 // If the sign bit of the input is known set or clear, then we know the
1151 // top bits of the result.
1153 // If the input sign bit is known zero, or if the NewBits are not demanded
1154 // convert this into a zero extension.
1155 if (RHSKnownZero[SrcBitWidth-1] || (NewBits & ~DemandedMask) == NewBits) {
1156 // Convert to ZExt cast
1157 CastInst *NewCast = new ZExtInst(I->getOperand(0), VTy, I->getName());
1158 return InsertNewInstBefore(NewCast, *I);
1159 } else if (RHSKnownOne[SrcBitWidth-1]) { // Input sign bit known set
1160 RHSKnownOne |= NewBits;
1164 case Instruction::Add: {
1165 // Figure out what the input bits are. If the top bits of the and result
1166 // are not demanded, then the add doesn't demand them from its input
1168 unsigned NLZ = DemandedMask.countLeadingZeros();
1170 // If there is a constant on the RHS, there are a variety of xformations
1172 if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
1173 // If null, this should be simplified elsewhere. Some of the xforms here
1174 // won't work if the RHS is zero.
1178 // If the top bit of the output is demanded, demand everything from the
1179 // input. Otherwise, we demand all the input bits except NLZ top bits.
1180 APInt InDemandedBits(APInt::getLowBitsSet(BitWidth, BitWidth - NLZ));
1182 // Find information about known zero/one bits in the input.
1183 if (SimplifyDemandedBits(I->getOperandUse(0), InDemandedBits,
1184 LHSKnownZero, LHSKnownOne, Depth+1))
1187 // If the RHS of the add has bits set that can't affect the input, reduce
1189 if (ShrinkDemandedConstant(I, 1, InDemandedBits))
1192 // Avoid excess work.
1193 if (LHSKnownZero == 0 && LHSKnownOne == 0)
1196 // Turn it into OR if input bits are zero.
1197 if ((LHSKnownZero & RHS->getValue()) == RHS->getValue()) {
1199 BinaryOperator::CreateOr(I->getOperand(0), I->getOperand(1),
1201 return InsertNewInstBefore(Or, *I);
1204 // We can say something about the output known-zero and known-one bits,
1205 // depending on potential carries from the input constant and the
1206 // unknowns. For example if the LHS is known to have at most the 0x0F0F0
1207 // bits set and the RHS constant is 0x01001, then we know we have a known
1208 // one mask of 0x00001 and a known zero mask of 0xE0F0E.
1210 // To compute this, we first compute the potential carry bits. These are
1211 // the bits which may be modified. I'm not aware of a better way to do
1213 const APInt &RHSVal = RHS->getValue();
1214 APInt CarryBits((~LHSKnownZero + RHSVal) ^ (~LHSKnownZero ^ RHSVal));
1216 // Now that we know which bits have carries, compute the known-1/0 sets.
1218 // Bits are known one if they are known zero in one operand and one in the
1219 // other, and there is no input carry.
1220 RHSKnownOne = ((LHSKnownZero & RHSVal) |
1221 (LHSKnownOne & ~RHSVal)) & ~CarryBits;
1223 // Bits are known zero if they are known zero in both operands and there
1224 // is no input carry.
1225 RHSKnownZero = LHSKnownZero & ~RHSVal & ~CarryBits;
1227 // If the high-bits of this ADD are not demanded, then it does not demand
1228 // the high bits of its LHS or RHS.
1229 if (DemandedMask[BitWidth-1] == 0) {
1230 // Right fill the mask of bits for this ADD to demand the most
1231 // significant bit and all those below it.
1232 APInt DemandedFromOps(APInt::getLowBitsSet(BitWidth, BitWidth-NLZ));
1233 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedFromOps,
1234 LHSKnownZero, LHSKnownOne, Depth+1) ||
1235 SimplifyDemandedBits(I->getOperandUse(1), DemandedFromOps,
1236 LHSKnownZero, LHSKnownOne, Depth+1))
1242 case Instruction::Sub:
1243 // If the high-bits of this SUB are not demanded, then it does not demand
1244 // the high bits of its LHS or RHS.
1245 if (DemandedMask[BitWidth-1] == 0) {
1246 // Right fill the mask of bits for this SUB to demand the most
1247 // significant bit and all those below it.
1248 uint32_t NLZ = DemandedMask.countLeadingZeros();
1249 APInt DemandedFromOps(APInt::getLowBitsSet(BitWidth, BitWidth-NLZ));
1250 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedFromOps,
1251 LHSKnownZero, LHSKnownOne, Depth+1) ||
1252 SimplifyDemandedBits(I->getOperandUse(1), DemandedFromOps,
1253 LHSKnownZero, LHSKnownOne, Depth+1))
1256 // Otherwise just hand the sub off to ComputeMaskedBits to fill in
1257 // the known zeros and ones.
1258 ComputeMaskedBits(V, DemandedMask, RHSKnownZero, RHSKnownOne, Depth);
1260 case Instruction::Shl:
1261 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
1262 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
1263 APInt DemandedMaskIn(DemandedMask.lshr(ShiftAmt));
1264 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMaskIn,
1265 RHSKnownZero, RHSKnownOne, Depth+1))
1267 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1268 RHSKnownZero <<= ShiftAmt;
1269 RHSKnownOne <<= ShiftAmt;
1270 // low bits known zero.
1272 RHSKnownZero |= APInt::getLowBitsSet(BitWidth, ShiftAmt);
1275 case Instruction::LShr:
1276 // For a logical shift right
1277 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
1278 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
1280 // Unsigned shift right.
1281 APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
1282 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMaskIn,
1283 RHSKnownZero, RHSKnownOne, Depth+1))
1285 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1286 RHSKnownZero = APIntOps::lshr(RHSKnownZero, ShiftAmt);
1287 RHSKnownOne = APIntOps::lshr(RHSKnownOne, ShiftAmt);
1289 // Compute the new bits that are at the top now.
1290 APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt));
1291 RHSKnownZero |= HighBits; // high bits known zero.
1295 case Instruction::AShr:
1296 // If this is an arithmetic shift right and only the low-bit is set, we can
1297 // always convert this into a logical shr, even if the shift amount is
1298 // variable. The low bit of the shift cannot be an input sign bit unless
1299 // the shift amount is >= the size of the datatype, which is undefined.
1300 if (DemandedMask == 1) {
1301 // Perform the logical shift right.
1302 Instruction *NewVal = BinaryOperator::CreateLShr(
1303 I->getOperand(0), I->getOperand(1), I->getName());
1304 return InsertNewInstBefore(NewVal, *I);
1307 // If the sign bit is the only bit demanded by this ashr, then there is no
1308 // need to do it, the shift doesn't change the high bit.
1309 if (DemandedMask.isSignBit())
1310 return I->getOperand(0);
1312 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
1313 uint32_t ShiftAmt = SA->getLimitedValue(BitWidth);
1315 // Signed shift right.
1316 APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
1317 // If any of the "high bits" are demanded, we should set the sign bit as
1319 if (DemandedMask.countLeadingZeros() <= ShiftAmt)
1320 DemandedMaskIn.set(BitWidth-1);
1321 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMaskIn,
1322 RHSKnownZero, RHSKnownOne, Depth+1))
1324 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1325 // Compute the new bits that are at the top now.
1326 APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt));
1327 RHSKnownZero = APIntOps::lshr(RHSKnownZero, ShiftAmt);
1328 RHSKnownOne = APIntOps::lshr(RHSKnownOne, ShiftAmt);
1330 // Handle the sign bits.
1331 APInt SignBit(APInt::getSignBit(BitWidth));
1332 // Adjust to where it is now in the mask.
1333 SignBit = APIntOps::lshr(SignBit, ShiftAmt);
1335 // If the input sign bit is known to be zero, or if none of the top bits
1336 // are demanded, turn this into an unsigned shift right.
1337 if (BitWidth <= ShiftAmt || RHSKnownZero[BitWidth-ShiftAmt-1] ||
1338 (HighBits & ~DemandedMask) == HighBits) {
1339 // Perform the logical shift right.
1340 Instruction *NewVal = BinaryOperator::CreateLShr(
1341 I->getOperand(0), SA, I->getName());
1342 return InsertNewInstBefore(NewVal, *I);
1343 } else if ((RHSKnownOne & SignBit) != 0) { // New bits are known one.
1344 RHSKnownOne |= HighBits;
1348 case Instruction::SRem:
1349 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1350 APInt RA = Rem->getValue().abs();
1351 if (RA.isPowerOf2()) {
1352 if (DemandedMask.ult(RA)) // srem won't affect demanded bits
1353 return I->getOperand(0);
1355 APInt LowBits = RA - 1;
1356 APInt Mask2 = LowBits | APInt::getSignBit(BitWidth);
1357 if (SimplifyDemandedBits(I->getOperandUse(0), Mask2,
1358 LHSKnownZero, LHSKnownOne, Depth+1))
1361 if (LHSKnownZero[BitWidth-1] || ((LHSKnownZero & LowBits) == LowBits))
1362 LHSKnownZero |= ~LowBits;
1364 KnownZero |= LHSKnownZero & DemandedMask;
1366 assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
1370 case Instruction::URem: {
1371 APInt KnownZero2(BitWidth, 0), KnownOne2(BitWidth, 0);
1372 APInt AllOnes = APInt::getAllOnesValue(BitWidth);
1373 if (SimplifyDemandedBits(I->getOperandUse(0), AllOnes,
1374 KnownZero2, KnownOne2, Depth+1) ||
1375 SimplifyDemandedBits(I->getOperandUse(1), AllOnes,
1376 KnownZero2, KnownOne2, Depth+1))
1379 unsigned Leaders = KnownZero2.countLeadingOnes();
1380 Leaders = std::max(Leaders,
1381 KnownZero2.countLeadingOnes());
1382 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders) & DemandedMask;
1385 case Instruction::Call:
1386 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1387 switch (II->getIntrinsicID()) {
1389 case Intrinsic::bswap: {
1390 // If the only bits demanded come from one byte of the bswap result,
1391 // just shift the input byte into position to eliminate the bswap.
1392 unsigned NLZ = DemandedMask.countLeadingZeros();
1393 unsigned NTZ = DemandedMask.countTrailingZeros();
1395 // Round NTZ down to the next byte. If we have 11 trailing zeros, then
1396 // we need all the bits down to bit 8. Likewise, round NLZ. If we
1397 // have 14 leading zeros, round to 8.
1400 // If we need exactly one byte, we can do this transformation.
1401 if (BitWidth-NLZ-NTZ == 8) {
1402 unsigned ResultBit = NTZ;
1403 unsigned InputBit = BitWidth-NTZ-8;
1405 // Replace this with either a left or right shift to get the byte into
1407 Instruction *NewVal;
1408 if (InputBit > ResultBit)
1409 NewVal = BinaryOperator::CreateLShr(I->getOperand(1),
1410 ConstantInt::get(I->getType(), InputBit-ResultBit));
1412 NewVal = BinaryOperator::CreateShl(I->getOperand(1),
1413 ConstantInt::get(I->getType(), ResultBit-InputBit));
1414 NewVal->takeName(I);
1415 return InsertNewInstBefore(NewVal, *I);
1418 // TODO: Could compute known zero/one bits based on the input.
1423 ComputeMaskedBits(V, DemandedMask, RHSKnownZero, RHSKnownOne, Depth);
1427 // If the client is only demanding bits that we know, return the known
1429 if ((DemandedMask & (RHSKnownZero|RHSKnownOne)) == DemandedMask)
1430 return Constant::getIntegerValue(VTy, RHSKnownOne);
1435 /// SimplifyDemandedVectorElts - The specified value produces a vector with
1436 /// any number of elements. DemandedElts contains the set of elements that are
1437 /// actually used by the caller. This method analyzes which elements of the
1438 /// operand are undef and returns that information in UndefElts.
1440 /// If the information about demanded elements can be used to simplify the
1441 /// operation, the operation is simplified, then the resultant value is
1442 /// returned. This returns null if no change was made.
1443 Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
1446 unsigned VWidth = cast<VectorType>(V->getType())->getNumElements();
1447 APInt EltMask(APInt::getAllOnesValue(VWidth));
1448 assert((DemandedElts & ~EltMask) == 0 && "Invalid DemandedElts!");
1450 if (isa<UndefValue>(V)) {
1451 // If the entire vector is undefined, just return this info.
1452 UndefElts = EltMask;
1454 } else if (DemandedElts == 0) { // If nothing is demanded, provide undef.
1455 UndefElts = EltMask;
1456 return UndefValue::get(V->getType());
1460 if (ConstantVector *CP = dyn_cast<ConstantVector>(V)) {
1461 const Type *EltTy = cast<VectorType>(V->getType())->getElementType();
1462 Constant *Undef = UndefValue::get(EltTy);
1464 std::vector<Constant*> Elts;
1465 for (unsigned i = 0; i != VWidth; ++i)
1466 if (!DemandedElts[i]) { // If not demanded, set to undef.
1467 Elts.push_back(Undef);
1469 } else if (isa<UndefValue>(CP->getOperand(i))) { // Already undef.
1470 Elts.push_back(Undef);
1472 } else { // Otherwise, defined.
1473 Elts.push_back(CP->getOperand(i));
1476 // If we changed the constant, return it.
1477 Constant *NewCP = ConstantVector::get(Elts);
1478 return NewCP != CP ? NewCP : 0;
1479 } else if (isa<ConstantAggregateZero>(V)) {
1480 // Simplify the CAZ to a ConstantVector where the non-demanded elements are
1483 // Check if this is identity. If so, return 0 since we are not simplifying
1485 if (DemandedElts == ((1ULL << VWidth) -1))
1488 const Type *EltTy = cast<VectorType>(V->getType())->getElementType();
1489 Constant *Zero = Constant::getNullValue(EltTy);
1490 Constant *Undef = UndefValue::get(EltTy);
1491 std::vector<Constant*> Elts;
1492 for (unsigned i = 0; i != VWidth; ++i) {
1493 Constant *Elt = DemandedElts[i] ? Zero : Undef;
1494 Elts.push_back(Elt);
1496 UndefElts = DemandedElts ^ EltMask;
1497 return ConstantVector::get(Elts);
1500 // Limit search depth.
1504 // If multiple users are using the root value, procede with
1505 // simplification conservatively assuming that all elements
1507 if (!V->hasOneUse()) {
1508 // Quit if we find multiple users of a non-root value though.
1509 // They'll be handled when it's their turn to be visited by
1510 // the main instcombine process.
1512 // TODO: Just compute the UndefElts information recursively.
1515 // Conservatively assume that all elements are needed.
1516 DemandedElts = EltMask;
1519 Instruction *I = dyn_cast<Instruction>(V);
1520 if (!I) return 0; // Only analyze instructions.
1522 bool MadeChange = false;
1523 APInt UndefElts2(VWidth, 0);
1525 switch (I->getOpcode()) {
1528 case Instruction::InsertElement: {
1529 // If this is a variable index, we don't know which element it overwrites.
1530 // demand exactly the same input as we produce.
1531 ConstantInt *Idx = dyn_cast<ConstantInt>(I->getOperand(2));
1533 // Note that we can't propagate undef elt info, because we don't know
1534 // which elt is getting updated.
1535 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts,
1536 UndefElts2, Depth+1);
1537 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1541 // If this is inserting an element that isn't demanded, remove this
1543 unsigned IdxNo = Idx->getZExtValue();
1544 if (IdxNo >= VWidth || !DemandedElts[IdxNo]) {
1546 return I->getOperand(0);
1549 // Otherwise, the element inserted overwrites whatever was there, so the
1550 // input demanded set is simpler than the output set.
1551 APInt DemandedElts2 = DemandedElts;
1552 DemandedElts2.clear(IdxNo);
1553 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts2,
1554 UndefElts, Depth+1);
1555 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1557 // The inserted element is defined.
1558 UndefElts.clear(IdxNo);
1561 case Instruction::ShuffleVector: {
1562 ShuffleVectorInst *Shuffle = cast<ShuffleVectorInst>(I);
1563 uint64_t LHSVWidth =
1564 cast<VectorType>(Shuffle->getOperand(0)->getType())->getNumElements();
1565 APInt LeftDemanded(LHSVWidth, 0), RightDemanded(LHSVWidth, 0);
1566 for (unsigned i = 0; i < VWidth; i++) {
1567 if (DemandedElts[i]) {
1568 unsigned MaskVal = Shuffle->getMaskValue(i);
1569 if (MaskVal != -1u) {
1570 assert(MaskVal < LHSVWidth * 2 &&
1571 "shufflevector mask index out of range!");
1572 if (MaskVal < LHSVWidth)
1573 LeftDemanded.set(MaskVal);
1575 RightDemanded.set(MaskVal - LHSVWidth);
1580 APInt UndefElts4(LHSVWidth, 0);
1581 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), LeftDemanded,
1582 UndefElts4, Depth+1);
1583 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1585 APInt UndefElts3(LHSVWidth, 0);
1586 TmpV = SimplifyDemandedVectorElts(I->getOperand(1), RightDemanded,
1587 UndefElts3, Depth+1);
1588 if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; }
1590 bool NewUndefElts = false;
1591 for (unsigned i = 0; i < VWidth; i++) {
1592 unsigned MaskVal = Shuffle->getMaskValue(i);
1593 if (MaskVal == -1u) {
1595 } else if (MaskVal < LHSVWidth) {
1596 if (UndefElts4[MaskVal]) {
1597 NewUndefElts = true;
1601 if (UndefElts3[MaskVal - LHSVWidth]) {
1602 NewUndefElts = true;
1609 // Add additional discovered undefs.
1610 std::vector<Constant*> Elts;
1611 for (unsigned i = 0; i < VWidth; ++i) {
1613 Elts.push_back(UndefValue::get(Type::getInt32Ty(*Context)));
1615 Elts.push_back(ConstantInt::get(Type::getInt32Ty(*Context),
1616 Shuffle->getMaskValue(i)));
1618 I->setOperand(2, ConstantVector::get(Elts));
1623 case Instruction::BitCast: {
1624 // Vector->vector casts only.
1625 const VectorType *VTy = dyn_cast<VectorType>(I->getOperand(0)->getType());
1627 unsigned InVWidth = VTy->getNumElements();
1628 APInt InputDemandedElts(InVWidth, 0);
1631 if (VWidth == InVWidth) {
1632 // If we are converting from <4 x i32> -> <4 x f32>, we demand the same
1633 // elements as are demanded of us.
1635 InputDemandedElts = DemandedElts;
1636 } else if (VWidth > InVWidth) {
1640 // If there are more elements in the result than there are in the source,
1641 // then an input element is live if any of the corresponding output
1642 // elements are live.
1643 Ratio = VWidth/InVWidth;
1644 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) {
1645 if (DemandedElts[OutIdx])
1646 InputDemandedElts.set(OutIdx/Ratio);
1652 // If there are more elements in the source than there are in the result,
1653 // then an input element is live if the corresponding output element is
1655 Ratio = InVWidth/VWidth;
1656 for (unsigned InIdx = 0; InIdx != InVWidth; ++InIdx)
1657 if (DemandedElts[InIdx/Ratio])
1658 InputDemandedElts.set(InIdx);
1661 // div/rem demand all inputs, because they don't want divide by zero.
1662 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), InputDemandedElts,
1663 UndefElts2, Depth+1);
1665 I->setOperand(0, TmpV);
1669 UndefElts = UndefElts2;
1670 if (VWidth > InVWidth) {
1671 llvm_unreachable("Unimp");
1672 // If there are more elements in the result than there are in the source,
1673 // then an output element is undef if the corresponding input element is
1675 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx)
1676 if (UndefElts2[OutIdx/Ratio])
1677 UndefElts.set(OutIdx);
1678 } else if (VWidth < InVWidth) {
1679 llvm_unreachable("Unimp");
1680 // If there are more elements in the source than there are in the result,
1681 // then a result element is undef if all of the corresponding input
1682 // elements are undef.
1683 UndefElts = ~0ULL >> (64-VWidth); // Start out all undef.
1684 for (unsigned InIdx = 0; InIdx != InVWidth; ++InIdx)
1685 if (!UndefElts2[InIdx]) // Not undef?
1686 UndefElts.clear(InIdx/Ratio); // Clear undef bit.
1690 case Instruction::And:
1691 case Instruction::Or:
1692 case Instruction::Xor:
1693 case Instruction::Add:
1694 case Instruction::Sub:
1695 case Instruction::Mul:
1696 // div/rem demand all inputs, because they don't want divide by zero.
1697 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts,
1698 UndefElts, Depth+1);
1699 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1700 TmpV = SimplifyDemandedVectorElts(I->getOperand(1), DemandedElts,
1701 UndefElts2, Depth+1);
1702 if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; }
1704 // Output elements are undefined if both are undefined. Consider things
1705 // like undef&0. The result is known zero, not undef.
1706 UndefElts &= UndefElts2;
1709 case Instruction::Call: {
1710 IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
1712 switch (II->getIntrinsicID()) {
1715 // Binary vector operations that work column-wise. A dest element is a
1716 // function of the corresponding input elements from the two inputs.
1717 case Intrinsic::x86_sse_sub_ss:
1718 case Intrinsic::x86_sse_mul_ss:
1719 case Intrinsic::x86_sse_min_ss:
1720 case Intrinsic::x86_sse_max_ss:
1721 case Intrinsic::x86_sse2_sub_sd:
1722 case Intrinsic::x86_sse2_mul_sd:
1723 case Intrinsic::x86_sse2_min_sd:
1724 case Intrinsic::x86_sse2_max_sd:
1725 TmpV = SimplifyDemandedVectorElts(II->getOperand(1), DemandedElts,
1726 UndefElts, Depth+1);
1727 if (TmpV) { II->setOperand(1, TmpV); MadeChange = true; }
1728 TmpV = SimplifyDemandedVectorElts(II->getOperand(2), DemandedElts,
1729 UndefElts2, Depth+1);
1730 if (TmpV) { II->setOperand(2, TmpV); MadeChange = true; }
1732 // If only the low elt is demanded and this is a scalarizable intrinsic,
1733 // scalarize it now.
1734 if (DemandedElts == 1) {
1735 switch (II->getIntrinsicID()) {
1737 case Intrinsic::x86_sse_sub_ss:
1738 case Intrinsic::x86_sse_mul_ss:
1739 case Intrinsic::x86_sse2_sub_sd:
1740 case Intrinsic::x86_sse2_mul_sd:
1741 // TODO: Lower MIN/MAX/ABS/etc
1742 Value *LHS = II->getOperand(1);
1743 Value *RHS = II->getOperand(2);
1744 // Extract the element as scalars.
1745 LHS = InsertNewInstBefore(ExtractElementInst::Create(LHS,
1746 ConstantInt::get(Type::getInt32Ty(*Context), 0U, false), "tmp"), *II);
1747 RHS = InsertNewInstBefore(ExtractElementInst::Create(RHS,
1748 ConstantInt::get(Type::getInt32Ty(*Context), 0U, false), "tmp"), *II);
1750 switch (II->getIntrinsicID()) {
1751 default: llvm_unreachable("Case stmts out of sync!");
1752 case Intrinsic::x86_sse_sub_ss:
1753 case Intrinsic::x86_sse2_sub_sd:
1754 TmpV = InsertNewInstBefore(BinaryOperator::CreateFSub(LHS, RHS,
1755 II->getName()), *II);
1757 case Intrinsic::x86_sse_mul_ss:
1758 case Intrinsic::x86_sse2_mul_sd:
1759 TmpV = InsertNewInstBefore(BinaryOperator::CreateFMul(LHS, RHS,
1760 II->getName()), *II);
1765 InsertElementInst::Create(
1766 UndefValue::get(II->getType()), TmpV,
1767 ConstantInt::get(Type::getInt32Ty(*Context), 0U, false), II->getName());
1768 InsertNewInstBefore(New, *II);
1773 // Output elements are undefined if both are undefined. Consider things
1774 // like undef&0. The result is known zero, not undef.
1775 UndefElts &= UndefElts2;
1781 return MadeChange ? I : 0;
1785 /// AssociativeOpt - Perform an optimization on an associative operator. This
1786 /// function is designed to check a chain of associative operators for a
1787 /// potential to apply a certain optimization. Since the optimization may be
1788 /// applicable if the expression was reassociated, this checks the chain, then
1789 /// reassociates the expression as necessary to expose the optimization
1790 /// opportunity. This makes use of a special Functor, which must define
1791 /// 'shouldApply' and 'apply' methods.
1793 template<typename Functor>
1794 static Instruction *AssociativeOpt(BinaryOperator &Root, const Functor &F) {
1795 unsigned Opcode = Root.getOpcode();
1796 Value *LHS = Root.getOperand(0);
1798 // Quick check, see if the immediate LHS matches...
1799 if (F.shouldApply(LHS))
1800 return F.apply(Root);
1802 // Otherwise, if the LHS is not of the same opcode as the root, return.
1803 Instruction *LHSI = dyn_cast<Instruction>(LHS);
1804 while (LHSI && LHSI->getOpcode() == Opcode && LHSI->hasOneUse()) {
1805 // Should we apply this transform to the RHS?
1806 bool ShouldApply = F.shouldApply(LHSI->getOperand(1));
1808 // If not to the RHS, check to see if we should apply to the LHS...
1809 if (!ShouldApply && F.shouldApply(LHSI->getOperand(0))) {
1810 cast<BinaryOperator>(LHSI)->swapOperands(); // Make the LHS the RHS
1814 // If the functor wants to apply the optimization to the RHS of LHSI,
1815 // reassociate the expression from ((? op A) op B) to (? op (A op B))
1817 // Now all of the instructions are in the current basic block, go ahead
1818 // and perform the reassociation.
1819 Instruction *TmpLHSI = cast<Instruction>(Root.getOperand(0));
1821 // First move the selected RHS to the LHS of the root...
1822 Root.setOperand(0, LHSI->getOperand(1));
1824 // Make what used to be the LHS of the root be the user of the root...
1825 Value *ExtraOperand = TmpLHSI->getOperand(1);
1826 if (&Root == TmpLHSI) {
1827 Root.replaceAllUsesWith(Constant::getNullValue(TmpLHSI->getType()));
1830 Root.replaceAllUsesWith(TmpLHSI); // Users now use TmpLHSI
1831 TmpLHSI->setOperand(1, &Root); // TmpLHSI now uses the root
1832 BasicBlock::iterator ARI = &Root; ++ARI;
1833 TmpLHSI->moveBefore(ARI); // Move TmpLHSI to after Root
1836 // Now propagate the ExtraOperand down the chain of instructions until we
1838 while (TmpLHSI != LHSI) {
1839 Instruction *NextLHSI = cast<Instruction>(TmpLHSI->getOperand(0));
1840 // Move the instruction to immediately before the chain we are
1841 // constructing to avoid breaking dominance properties.
1842 NextLHSI->moveBefore(ARI);
1845 Value *NextOp = NextLHSI->getOperand(1);
1846 NextLHSI->setOperand(1, ExtraOperand);
1848 ExtraOperand = NextOp;
1851 // Now that the instructions are reassociated, have the functor perform
1852 // the transformation...
1853 return F.apply(Root);
1856 LHSI = dyn_cast<Instruction>(LHSI->getOperand(0));
1863 // AddRHS - Implements: X + X --> X << 1
1866 explicit AddRHS(Value *rhs) : RHS(rhs) {}
1867 bool shouldApply(Value *LHS) const { return LHS == RHS; }
1868 Instruction *apply(BinaryOperator &Add) const {
1869 return BinaryOperator::CreateShl(Add.getOperand(0),
1870 ConstantInt::get(Add.getType(), 1));
1874 // AddMaskingAnd - Implements (A & C1)+(B & C2) --> (A & C1)|(B & C2)
1876 struct AddMaskingAnd {
1878 explicit AddMaskingAnd(Constant *c) : C2(c) {}
1879 bool shouldApply(Value *LHS) const {
1881 return match(LHS, m_And(m_Value(), m_ConstantInt(C1))) &&
1882 ConstantExpr::getAnd(C1, C2)->isNullValue();
1884 Instruction *apply(BinaryOperator &Add) const {
1885 return BinaryOperator::CreateOr(Add.getOperand(0), Add.getOperand(1));
1891 static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO,
1893 if (CastInst *CI = dyn_cast<CastInst>(&I))
1894 return IC->Builder->CreateCast(CI->getOpcode(), SO, I.getType());
1896 // Figure out if the constant is the left or the right argument.
1897 bool ConstIsRHS = isa<Constant>(I.getOperand(1));
1898 Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS));
1900 if (Constant *SOC = dyn_cast<Constant>(SO)) {
1902 return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand);
1903 return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC);
1906 Value *Op0 = SO, *Op1 = ConstOperand;
1908 std::swap(Op0, Op1);
1910 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I))
1911 return IC->Builder->CreateBinOp(BO->getOpcode(), Op0, Op1,
1912 SO->getName()+".op");
1913 if (ICmpInst *CI = dyn_cast<ICmpInst>(&I))
1914 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
1915 SO->getName()+".cmp");
1916 if (FCmpInst *CI = dyn_cast<FCmpInst>(&I))
1917 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
1918 SO->getName()+".cmp");
1919 llvm_unreachable("Unknown binary instruction type!");
1922 // FoldOpIntoSelect - Given an instruction with a select as one operand and a
1923 // constant as the other operand, try to fold the binary operator into the
1924 // select arguments. This also works for Cast instructions, which obviously do
1925 // not have a second operand.
1926 static Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI,
1928 // Don't modify shared select instructions
1929 if (!SI->hasOneUse()) return 0;
1930 Value *TV = SI->getOperand(1);
1931 Value *FV = SI->getOperand(2);
1933 if (isa<Constant>(TV) || isa<Constant>(FV)) {
1934 // Bool selects with constant operands can be folded to logical ops.
1935 if (SI->getType() == Type::getInt1Ty(*IC->getContext())) return 0;
1937 Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, IC);
1938 Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, IC);
1940 return SelectInst::Create(SI->getCondition(), SelectTrueVal,
1947 /// FoldOpIntoPhi - Given a binary operator, cast instruction, or select which
1948 /// has a PHI node as operand #0, see if we can fold the instruction into the
1949 /// PHI (which is only possible if all operands to the PHI are constants).
1951 /// If AllowAggressive is true, FoldOpIntoPhi will allow certain transforms
1952 /// that would normally be unprofitable because they strongly encourage jump
1954 Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I,
1955 bool AllowAggressive) {
1956 AllowAggressive = false;
1957 PHINode *PN = cast<PHINode>(I.getOperand(0));
1958 unsigned NumPHIValues = PN->getNumIncomingValues();
1959 if (NumPHIValues == 0 ||
1960 // We normally only transform phis with a single use, unless we're trying
1961 // hard to make jump threading happen.
1962 (!PN->hasOneUse() && !AllowAggressive))
1966 // Check to see if all of the operands of the PHI are simple constants
1967 // (constantint/constantfp/undef). If there is one non-constant value,
1968 // remember the BB it is in. If there is more than one or if *it* is a PHI,
1969 // bail out. We don't do arbitrary constant expressions here because moving
1970 // their computation can be expensive without a cost model.
1971 BasicBlock *NonConstBB = 0;
1972 for (unsigned i = 0; i != NumPHIValues; ++i)
1973 if (!isa<Constant>(PN->getIncomingValue(i)) ||
1974 isa<ConstantExpr>(PN->getIncomingValue(i))) {
1975 if (NonConstBB) return 0; // More than one non-const value.
1976 if (isa<PHINode>(PN->getIncomingValue(i))) return 0; // Itself a phi.
1977 NonConstBB = PN->getIncomingBlock(i);
1979 // If the incoming non-constant value is in I's block, we have an infinite
1981 if (NonConstBB == I.getParent())
1985 // If there is exactly one non-constant value, we can insert a copy of the
1986 // operation in that block. However, if this is a critical edge, we would be
1987 // inserting the computation one some other paths (e.g. inside a loop). Only
1988 // do this if the pred block is unconditionally branching into the phi block.
1989 if (NonConstBB != 0 && !AllowAggressive) {
1990 BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator());
1991 if (!BI || !BI->isUnconditional()) return 0;
1994 // Okay, we can do the transformation: create the new PHI node.
1995 PHINode *NewPN = PHINode::Create(I.getType(), "");
1996 NewPN->reserveOperandSpace(PN->getNumOperands()/2);
1997 InsertNewInstBefore(NewPN, *PN);
1998 NewPN->takeName(PN);
2000 // Next, add all of the operands to the PHI.
2001 if (SelectInst *SI = dyn_cast<SelectInst>(&I)) {
2002 // We only currently try to fold the condition of a select when it is a phi,
2003 // not the true/false values.
2004 Value *TrueV = SI->getTrueValue();
2005 Value *FalseV = SI->getFalseValue();
2006 BasicBlock *PhiTransBB = PN->getParent();
2007 for (unsigned i = 0; i != NumPHIValues; ++i) {
2008 BasicBlock *ThisBB = PN->getIncomingBlock(i);
2009 Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB);
2010 Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB);
2012 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
2013 InV = InC->isNullValue() ? FalseVInPred : TrueVInPred;
2015 assert(PN->getIncomingBlock(i) == NonConstBB);
2016 InV = SelectInst::Create(PN->getIncomingValue(i), TrueVInPred,
2018 "phitmp", NonConstBB->getTerminator());
2019 Worklist.Add(cast<Instruction>(InV));
2021 NewPN->addIncoming(InV, ThisBB);
2023 } else if (I.getNumOperands() == 2) {
2024 Constant *C = cast<Constant>(I.getOperand(1));
2025 for (unsigned i = 0; i != NumPHIValues; ++i) {
2027 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
2028 if (CmpInst *CI = dyn_cast<CmpInst>(&I))
2029 InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C);
2031 InV = ConstantExpr::get(I.getOpcode(), InC, C);
2033 assert(PN->getIncomingBlock(i) == NonConstBB);
2034 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I))
2035 InV = BinaryOperator::Create(BO->getOpcode(),
2036 PN->getIncomingValue(i), C, "phitmp",
2037 NonConstBB->getTerminator());
2038 else if (CmpInst *CI = dyn_cast<CmpInst>(&I))
2039 InV = CmpInst::Create(CI->getOpcode(),
2041 PN->getIncomingValue(i), C, "phitmp",
2042 NonConstBB->getTerminator());
2044 llvm_unreachable("Unknown binop!");
2046 Worklist.Add(cast<Instruction>(InV));
2048 NewPN->addIncoming(InV, PN->getIncomingBlock(i));
2051 CastInst *CI = cast<CastInst>(&I);
2052 const Type *RetTy = CI->getType();
2053 for (unsigned i = 0; i != NumPHIValues; ++i) {
2055 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
2056 InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy);
2058 assert(PN->getIncomingBlock(i) == NonConstBB);
2059 InV = CastInst::Create(CI->getOpcode(), PN->getIncomingValue(i),
2060 I.getType(), "phitmp",
2061 NonConstBB->getTerminator());
2062 Worklist.Add(cast<Instruction>(InV));
2064 NewPN->addIncoming(InV, PN->getIncomingBlock(i));
2067 return ReplaceInstUsesWith(I, NewPN);
2071 /// WillNotOverflowSignedAdd - Return true if we can prove that:
2072 /// (sext (add LHS, RHS)) === (add (sext LHS), (sext RHS))
2073 /// This basically requires proving that the add in the original type would not
2074 /// overflow to change the sign bit or have a carry out.
2075 bool InstCombiner::WillNotOverflowSignedAdd(Value *LHS, Value *RHS) {
2076 // There are different heuristics we can use for this. Here are some simple
2079 // Add has the property that adding any two 2's complement numbers can only
2080 // have one carry bit which can change a sign. As such, if LHS and RHS each
2081 // have at least two sign bits, we know that the addition of the two values will
2082 // sign extend fine.
2083 if (ComputeNumSignBits(LHS) > 1 && ComputeNumSignBits(RHS) > 1)
2087 // If one of the operands only has one non-zero bit, and if the other operand
2088 // has a known-zero bit in a more significant place than it (not including the
2089 // sign bit) the ripple may go up to and fill the zero, but won't change the
2090 // sign. For example, (X & ~4) + 1.
2098 Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
2099 bool Changed = SimplifyCommutative(I);
2100 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
2102 if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
2103 // X + undef -> undef
2104 if (isa<UndefValue>(RHS))
2105 return ReplaceInstUsesWith(I, RHS);
2108 if (RHSC->isNullValue())
2109 return ReplaceInstUsesWith(I, LHS);
2111 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHSC)) {
2112 // X + (signbit) --> X ^ signbit
2113 const APInt& Val = CI->getValue();
2114 uint32_t BitWidth = Val.getBitWidth();
2115 if (Val == APInt::getSignBit(BitWidth))
2116 return BinaryOperator::CreateXor(LHS, RHS);
2118 // See if SimplifyDemandedBits can simplify this. This handles stuff like
2119 // (X & 254)+1 -> (X&254)|1
2120 if (SimplifyDemandedInstructionBits(I))
2123 // zext(bool) + C -> bool ? C + 1 : C
2124 if (ZExtInst *ZI = dyn_cast<ZExtInst>(LHS))
2125 if (ZI->getSrcTy() == Type::getInt1Ty(*Context))
2126 return SelectInst::Create(ZI->getOperand(0), AddOne(CI), CI);
2129 if (isa<PHINode>(LHS))
2130 if (Instruction *NV = FoldOpIntoPhi(I))
2133 ConstantInt *XorRHS = 0;
2135 if (isa<ConstantInt>(RHSC) &&
2136 match(LHS, m_Xor(m_Value(XorLHS), m_ConstantInt(XorRHS)))) {
2137 uint32_t TySizeBits = I.getType()->getScalarSizeInBits();
2138 const APInt& RHSVal = cast<ConstantInt>(RHSC)->getValue();
2140 uint32_t Size = TySizeBits / 2;
2141 APInt C0080Val(APInt(TySizeBits, 1ULL).shl(Size - 1));
2142 APInt CFF80Val(-C0080Val);
2144 if (TySizeBits > Size) {
2145 // If we have ADD(XOR(AND(X, 0xFF), 0x80), 0xF..F80), it's a sext.
2146 // If we have ADD(XOR(AND(X, 0xFF), 0xF..F80), 0x80), it's a sext.
2147 if ((RHSVal == CFF80Val && XorRHS->getValue() == C0080Val) ||
2148 (RHSVal == C0080Val && XorRHS->getValue() == CFF80Val)) {
2149 // This is a sign extend if the top bits are known zero.
2150 if (!MaskedValueIsZero(XorLHS,
2151 APInt::getHighBitsSet(TySizeBits, TySizeBits - Size)))
2152 Size = 0; // Not a sign ext, but can't be any others either.
2157 C0080Val = APIntOps::lshr(C0080Val, Size);
2158 CFF80Val = APIntOps::ashr(CFF80Val, Size);
2159 } while (Size >= 1);
2161 // FIXME: This shouldn't be necessary. When the backends can handle types
2162 // with funny bit widths then this switch statement should be removed. It
2163 // is just here to get the size of the "middle" type back up to something
2164 // that the back ends can handle.
2165 const Type *MiddleType = 0;
2168 case 32: MiddleType = Type::getInt32Ty(*Context); break;
2169 case 16: MiddleType = Type::getInt16Ty(*Context); break;
2170 case 8: MiddleType = Type::getInt8Ty(*Context); break;
2173 Value *NewTrunc = Builder->CreateTrunc(XorLHS, MiddleType, "sext");
2174 return new SExtInst(NewTrunc, I.getType(), I.getName());
2179 if (I.getType() == Type::getInt1Ty(*Context))
2180 return BinaryOperator::CreateXor(LHS, RHS);
2183 if (I.getType()->isInteger()) {
2184 if (Instruction *Result = AssociativeOpt(I, AddRHS(RHS)))
2187 if (Instruction *RHSI = dyn_cast<Instruction>(RHS)) {
2188 if (RHSI->getOpcode() == Instruction::Sub)
2189 if (LHS == RHSI->getOperand(1)) // A + (B - A) --> B
2190 return ReplaceInstUsesWith(I, RHSI->getOperand(0));
2192 if (Instruction *LHSI = dyn_cast<Instruction>(LHS)) {
2193 if (LHSI->getOpcode() == Instruction::Sub)
2194 if (RHS == LHSI->getOperand(1)) // (B - A) + A --> B
2195 return ReplaceInstUsesWith(I, LHSI->getOperand(0));
2200 // -A + -B --> -(A + B)
2201 if (Value *LHSV = dyn_castNegVal(LHS)) {
2202 if (LHS->getType()->isIntOrIntVector()) {
2203 if (Value *RHSV = dyn_castNegVal(RHS)) {
2204 Value *NewAdd = Builder->CreateAdd(LHSV, RHSV, "sum");
2205 return BinaryOperator::CreateNeg(NewAdd);
2209 return BinaryOperator::CreateSub(RHS, LHSV);
2213 if (!isa<Constant>(RHS))
2214 if (Value *V = dyn_castNegVal(RHS))
2215 return BinaryOperator::CreateSub(LHS, V);
2219 if (Value *X = dyn_castFoldableMul(LHS, C2)) {
2220 if (X == RHS) // X*C + X --> X * (C+1)
2221 return BinaryOperator::CreateMul(RHS, AddOne(C2));
2223 // X*C1 + X*C2 --> X * (C1+C2)
2225 if (X == dyn_castFoldableMul(RHS, C1))
2226 return BinaryOperator::CreateMul(X, ConstantExpr::getAdd(C1, C2));
2229 // X + X*C --> X * (C+1)
2230 if (dyn_castFoldableMul(RHS, C2) == LHS)
2231 return BinaryOperator::CreateMul(LHS, AddOne(C2));
2233 // X + ~X --> -1 since ~X = -X-1
2234 if (dyn_castNotVal(LHS) == RHS ||
2235 dyn_castNotVal(RHS) == LHS)
2236 return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
2239 // (A & C1)+(B & C2) --> (A & C1)|(B & C2) iff C1&C2 == 0
2240 if (match(RHS, m_And(m_Value(), m_ConstantInt(C2))))
2241 if (Instruction *R = AssociativeOpt(I, AddMaskingAnd(C2)))
2244 // A+B --> A|B iff A and B have no bits set in common.
2245 if (const IntegerType *IT = dyn_cast<IntegerType>(I.getType())) {
2246 APInt Mask = APInt::getAllOnesValue(IT->getBitWidth());
2247 APInt LHSKnownOne(IT->getBitWidth(), 0);
2248 APInt LHSKnownZero(IT->getBitWidth(), 0);
2249 ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne);
2250 if (LHSKnownZero != 0) {
2251 APInt RHSKnownOne(IT->getBitWidth(), 0);
2252 APInt RHSKnownZero(IT->getBitWidth(), 0);
2253 ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne);
2255 // No bits in common -> bitwise or.
2256 if ((LHSKnownZero|RHSKnownZero).isAllOnesValue())
2257 return BinaryOperator::CreateOr(LHS, RHS);
2261 // W*X + Y*Z --> W * (X+Z) iff W == Y
2262 if (I.getType()->isIntOrIntVector()) {
2263 Value *W, *X, *Y, *Z;
2264 if (match(LHS, m_Mul(m_Value(W), m_Value(X))) &&
2265 match(RHS, m_Mul(m_Value(Y), m_Value(Z)))) {
2269 } else if (Y == X) {
2271 } else if (X == Z) {
2278 Value *NewAdd = Builder->CreateAdd(X, Z, LHS->getName());
2279 return BinaryOperator::CreateMul(W, NewAdd);
2284 if (ConstantInt *CRHS = dyn_cast<ConstantInt>(RHS)) {
2286 if (match(LHS, m_Not(m_Value(X)))) // ~X + C --> (C-1) - X
2287 return BinaryOperator::CreateSub(SubOne(CRHS), X);
2289 // (X & FF00) + xx00 -> (X+xx00) & FF00
2290 if (LHS->hasOneUse() &&
2291 match(LHS, m_And(m_Value(X), m_ConstantInt(C2)))) {
2292 Constant *Anded = ConstantExpr::getAnd(CRHS, C2);
2293 if (Anded == CRHS) {
2294 // See if all bits from the first bit set in the Add RHS up are included
2295 // in the mask. First, get the rightmost bit.
2296 const APInt& AddRHSV = CRHS->getValue();
2298 // Form a mask of all bits from the lowest bit added through the top.
2299 APInt AddRHSHighBits(~((AddRHSV & -AddRHSV)-1));
2301 // See if the and mask includes all of these bits.
2302 APInt AddRHSHighBitsAnd(AddRHSHighBits & C2->getValue());
2304 if (AddRHSHighBits == AddRHSHighBitsAnd) {
2305 // Okay, the xform is safe. Insert the new add pronto.
2306 Value *NewAdd = Builder->CreateAdd(X, CRHS, LHS->getName());
2307 return BinaryOperator::CreateAnd(NewAdd, C2);
2312 // Try to fold constant add into select arguments.
2313 if (SelectInst *SI = dyn_cast<SelectInst>(LHS))
2314 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
2318 // add (select X 0 (sub n A)) A --> select X A n
2320 SelectInst *SI = dyn_cast<SelectInst>(LHS);
2323 SI = dyn_cast<SelectInst>(RHS);
2326 if (SI && SI->hasOneUse()) {
2327 Value *TV = SI->getTrueValue();
2328 Value *FV = SI->getFalseValue();
2331 // Can we fold the add into the argument of the select?
2332 // We check both true and false select arguments for a matching subtract.
2333 if (match(FV, m_Zero()) &&
2334 match(TV, m_Sub(m_Value(N), m_Specific(A))))
2335 // Fold the add into the true select value.
2336 return SelectInst::Create(SI->getCondition(), N, A);
2337 if (match(TV, m_Zero()) &&
2338 match(FV, m_Sub(m_Value(N), m_Specific(A))))
2339 // Fold the add into the false select value.
2340 return SelectInst::Create(SI->getCondition(), A, N);
2344 // Check for (add (sext x), y), see if we can merge this into an
2345 // integer add followed by a sext.
2346 if (SExtInst *LHSConv = dyn_cast<SExtInst>(LHS)) {
2347 // (add (sext x), cst) --> (sext (add x, cst'))
2348 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) {
2350 ConstantExpr::getTrunc(RHSC, LHSConv->getOperand(0)->getType());
2351 if (LHSConv->hasOneUse() &&
2352 ConstantExpr::getSExt(CI, I.getType()) == RHSC &&
2353 WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI)) {
2354 // Insert the new, smaller add.
2355 Value *NewAdd = Builder->CreateAdd(LHSConv->getOperand(0),
2357 return new SExtInst(NewAdd, I.getType());
2361 // (add (sext x), (sext y)) --> (sext (add int x, y))
2362 if (SExtInst *RHSConv = dyn_cast<SExtInst>(RHS)) {
2363 // Only do this if x/y have the same type, if at last one of them has a
2364 // single use (so we don't increase the number of sexts), and if the
2365 // integer add will not overflow.
2366 if (LHSConv->getOperand(0)->getType()==RHSConv->getOperand(0)->getType()&&
2367 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
2368 WillNotOverflowSignedAdd(LHSConv->getOperand(0),
2369 RHSConv->getOperand(0))) {
2370 // Insert the new integer add.
2371 Value *NewAdd = Builder->CreateAdd(LHSConv->getOperand(0),
2372 RHSConv->getOperand(0), "addconv");
2373 return new SExtInst(NewAdd, I.getType());
2378 return Changed ? &I : 0;
2381 Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
2382 bool Changed = SimplifyCommutative(I);
2383 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
2385 if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
2387 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHSC)) {
2388 if (CFP->isExactlyValue(ConstantFP::getNegativeZero
2389 (I.getType())->getValueAPF()))
2390 return ReplaceInstUsesWith(I, LHS);
2393 if (isa<PHINode>(LHS))
2394 if (Instruction *NV = FoldOpIntoPhi(I))
2399 // -A + -B --> -(A + B)
2400 if (Value *LHSV = dyn_castFNegVal(LHS))
2401 return BinaryOperator::CreateFSub(RHS, LHSV);
2404 if (!isa<Constant>(RHS))
2405 if (Value *V = dyn_castFNegVal(RHS))
2406 return BinaryOperator::CreateFSub(LHS, V);
2408 // Check for X+0.0. Simplify it to X if we know X is not -0.0.
2409 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS))
2410 if (CFP->getValueAPF().isPosZero() && CannotBeNegativeZero(LHS))
2411 return ReplaceInstUsesWith(I, LHS);
2413 // Check for (add double (sitofp x), y), see if we can merge this into an
2414 // integer add followed by a promotion.
2415 if (SIToFPInst *LHSConv = dyn_cast<SIToFPInst>(LHS)) {
2416 // (add double (sitofp x), fpcst) --> (sitofp (add int x, intcst))
2417 // ... if the constant fits in the integer value. This is useful for things
2418 // like (double)(x & 1234) + 4.0 -> (double)((X & 1234)+4) which no longer
2419 // requires a constant pool load, and generally allows the add to be better
2421 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS)) {
2423 ConstantExpr::getFPToSI(CFP, LHSConv->getOperand(0)->getType());
2424 if (LHSConv->hasOneUse() &&
2425 ConstantExpr::getSIToFP(CI, I.getType()) == CFP &&
2426 WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI)) {
2427 // Insert the new integer add.
2428 Value *NewAdd = Builder->CreateAdd(LHSConv->getOperand(0),
2430 return new SIToFPInst(NewAdd, I.getType());
2434 // (add double (sitofp x), (sitofp y)) --> (sitofp (add int x, y))
2435 if (SIToFPInst *RHSConv = dyn_cast<SIToFPInst>(RHS)) {
2436 // Only do this if x/y have the same type, if at last one of them has a
2437 // single use (so we don't increase the number of int->fp conversions),
2438 // and if the integer add will not overflow.
2439 if (LHSConv->getOperand(0)->getType()==RHSConv->getOperand(0)->getType()&&
2440 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
2441 WillNotOverflowSignedAdd(LHSConv->getOperand(0),
2442 RHSConv->getOperand(0))) {
2443 // Insert the new integer add.
2444 Value *NewAdd = Builder->CreateAdd(LHSConv->getOperand(0),
2445 RHSConv->getOperand(0), "addconv");
2446 return new SIToFPInst(NewAdd, I.getType());
2451 return Changed ? &I : 0;
2454 Instruction *InstCombiner::visitSub(BinaryOperator &I) {
2455 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2457 if (Op0 == Op1) // sub X, X -> 0
2458 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
2460 // If this is a 'B = x-(-A)', change to B = x+A...
2461 if (Value *V = dyn_castNegVal(Op1))
2462 return BinaryOperator::CreateAdd(Op0, V);
2464 if (isa<UndefValue>(Op0))
2465 return ReplaceInstUsesWith(I, Op0); // undef - X -> undef
2466 if (isa<UndefValue>(Op1))
2467 return ReplaceInstUsesWith(I, Op1); // X - undef -> undef
2469 if (ConstantInt *C = dyn_cast<ConstantInt>(Op0)) {
2470 // Replace (-1 - A) with (~A)...
2471 if (C->isAllOnesValue())
2472 return BinaryOperator::CreateNot(Op1);
2474 // C - ~X == X + (1+C)
2476 if (match(Op1, m_Not(m_Value(X))))
2477 return BinaryOperator::CreateAdd(X, AddOne(C));
2479 // -(X >>u 31) -> (X >>s 31)
2480 // -(X >>s 31) -> (X >>u 31)
2482 if (BinaryOperator *SI = dyn_cast<BinaryOperator>(Op1)) {
2483 if (SI->getOpcode() == Instruction::LShr) {
2484 if (ConstantInt *CU = dyn_cast<ConstantInt>(SI->getOperand(1))) {
2485 // Check to see if we are shifting out everything but the sign bit.
2486 if (CU->getLimitedValue(SI->getType()->getPrimitiveSizeInBits()) ==
2487 SI->getType()->getPrimitiveSizeInBits()-1) {
2488 // Ok, the transformation is safe. Insert AShr.
2489 return BinaryOperator::Create(Instruction::AShr,
2490 SI->getOperand(0), CU, SI->getName());
2494 else if (SI->getOpcode() == Instruction::AShr) {
2495 if (ConstantInt *CU = dyn_cast<ConstantInt>(SI->getOperand(1))) {
2496 // Check to see if we are shifting out everything but the sign bit.
2497 if (CU->getLimitedValue(SI->getType()->getPrimitiveSizeInBits()) ==
2498 SI->getType()->getPrimitiveSizeInBits()-1) {
2499 // Ok, the transformation is safe. Insert LShr.
2500 return BinaryOperator::CreateLShr(
2501 SI->getOperand(0), CU, SI->getName());
2508 // Try to fold constant sub into select arguments.
2509 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
2510 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
2513 // C - zext(bool) -> bool ? C - 1 : C
2514 if (ZExtInst *ZI = dyn_cast<ZExtInst>(Op1))
2515 if (ZI->getSrcTy() == Type::getInt1Ty(*Context))
2516 return SelectInst::Create(ZI->getOperand(0), SubOne(C), C);
2519 if (I.getType() == Type::getInt1Ty(*Context))
2520 return BinaryOperator::CreateXor(Op0, Op1);
2522 if (BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1)) {
2523 if (Op1I->getOpcode() == Instruction::Add) {
2524 if (Op1I->getOperand(0) == Op0) // X-(X+Y) == -Y
2525 return BinaryOperator::CreateNeg(Op1I->getOperand(1),
2527 else if (Op1I->getOperand(1) == Op0) // X-(Y+X) == -Y
2528 return BinaryOperator::CreateNeg(Op1I->getOperand(0),
2530 else if (ConstantInt *CI1 = dyn_cast<ConstantInt>(I.getOperand(0))) {
2531 if (ConstantInt *CI2 = dyn_cast<ConstantInt>(Op1I->getOperand(1)))
2532 // C1-(X+C2) --> (C1-C2)-X
2533 return BinaryOperator::CreateSub(
2534 ConstantExpr::getSub(CI1, CI2), Op1I->getOperand(0));
2538 if (Op1I->hasOneUse()) {
2539 // Replace (x - (y - z)) with (x + (z - y)) if the (y - z) subexpression
2540 // is not used by anyone else...
2542 if (Op1I->getOpcode() == Instruction::Sub) {
2543 // Swap the two operands of the subexpr...
2544 Value *IIOp0 = Op1I->getOperand(0), *IIOp1 = Op1I->getOperand(1);
2545 Op1I->setOperand(0, IIOp1);
2546 Op1I->setOperand(1, IIOp0);
2548 // Create the new top level add instruction...
2549 return BinaryOperator::CreateAdd(Op0, Op1);
2552 // Replace (A - (A & B)) with (A & ~B) if this is the only use of (A&B)...
2554 if (Op1I->getOpcode() == Instruction::And &&
2555 (Op1I->getOperand(0) == Op0 || Op1I->getOperand(1) == Op0)) {
2556 Value *OtherOp = Op1I->getOperand(Op1I->getOperand(0) == Op0);
2558 Value *NewNot = Builder->CreateNot(OtherOp, "B.not");
2559 return BinaryOperator::CreateAnd(Op0, NewNot);
2562 // 0 - (X sdiv C) -> (X sdiv -C)
2563 if (Op1I->getOpcode() == Instruction::SDiv)
2564 if (ConstantInt *CSI = dyn_cast<ConstantInt>(Op0))
2566 if (Constant *DivRHS = dyn_cast<Constant>(Op1I->getOperand(1)))
2567 return BinaryOperator::CreateSDiv(Op1I->getOperand(0),
2568 ConstantExpr::getNeg(DivRHS));
2570 // X - X*C --> X * (1-C)
2571 ConstantInt *C2 = 0;
2572 if (dyn_castFoldableMul(Op1I, C2) == Op0) {
2574 ConstantExpr::getSub(ConstantInt::get(I.getType(), 1),
2576 return BinaryOperator::CreateMul(Op0, CP1);
2581 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
2582 if (Op0I->getOpcode() == Instruction::Add) {
2583 if (Op0I->getOperand(0) == Op1) // (Y+X)-Y == X
2584 return ReplaceInstUsesWith(I, Op0I->getOperand(1));
2585 else if (Op0I->getOperand(1) == Op1) // (X+Y)-Y == X
2586 return ReplaceInstUsesWith(I, Op0I->getOperand(0));
2587 } else if (Op0I->getOpcode() == Instruction::Sub) {
2588 if (Op0I->getOperand(0) == Op1) // (X-Y)-X == -Y
2589 return BinaryOperator::CreateNeg(Op0I->getOperand(1),
2595 if (Value *X = dyn_castFoldableMul(Op0, C1)) {
2596 if (X == Op1) // X*C - X --> X * (C-1)
2597 return BinaryOperator::CreateMul(Op1, SubOne(C1));
2599 ConstantInt *C2; // X*C1 - X*C2 -> X * (C1-C2)
2600 if (X == dyn_castFoldableMul(Op1, C2))
2601 return BinaryOperator::CreateMul(X, ConstantExpr::getSub(C1, C2));
2606 Instruction *InstCombiner::visitFSub(BinaryOperator &I) {
2607 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2609 // If this is a 'B = x-(-A)', change to B = x+A...
2610 if (Value *V = dyn_castFNegVal(Op1))
2611 return BinaryOperator::CreateFAdd(Op0, V);
2613 if (BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1)) {
2614 if (Op1I->getOpcode() == Instruction::FAdd) {
2615 if (Op1I->getOperand(0) == Op0) // X-(X+Y) == -Y
2616 return BinaryOperator::CreateFNeg(Op1I->getOperand(1),
2618 else if (Op1I->getOperand(1) == Op0) // X-(Y+X) == -Y
2619 return BinaryOperator::CreateFNeg(Op1I->getOperand(0),
2627 /// isSignBitCheck - Given an exploded icmp instruction, return true if the
2628 /// comparison only checks the sign bit. If it only checks the sign bit, set
2629 /// TrueIfSigned if the result of the comparison is true when the input value is
2631 static bool isSignBitCheck(ICmpInst::Predicate pred, ConstantInt *RHS,
2632 bool &TrueIfSigned) {
2634 case ICmpInst::ICMP_SLT: // True if LHS s< 0
2635 TrueIfSigned = true;
2636 return RHS->isZero();
2637 case ICmpInst::ICMP_SLE: // True if LHS s<= RHS and RHS == -1
2638 TrueIfSigned = true;
2639 return RHS->isAllOnesValue();
2640 case ICmpInst::ICMP_SGT: // True if LHS s> -1
2641 TrueIfSigned = false;
2642 return RHS->isAllOnesValue();
2643 case ICmpInst::ICMP_UGT:
2644 // True if LHS u> RHS and RHS == high-bit-mask - 1
2645 TrueIfSigned = true;
2646 return RHS->getValue() ==
2647 APInt::getSignedMaxValue(RHS->getType()->getPrimitiveSizeInBits());
2648 case ICmpInst::ICMP_UGE:
2649 // True if LHS u>= RHS and RHS == high-bit-mask (2^7, 2^15, 2^31, etc)
2650 TrueIfSigned = true;
2651 return RHS->getValue().isSignBit();
2657 Instruction *InstCombiner::visitMul(BinaryOperator &I) {
2658 bool Changed = SimplifyCommutative(I);
2659 Value *Op0 = I.getOperand(0);
2661 if (isa<UndefValue>(I.getOperand(1))) // undef * X -> 0
2662 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
2664 // Simplify mul instructions with a constant RHS.
2665 if (Constant *Op1 = dyn_cast<Constant>(I.getOperand(1))) {
2666 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
2668 // ((X << C1)*C2) == (X * (C2 << C1))
2669 if (BinaryOperator *SI = dyn_cast<BinaryOperator>(Op0))
2670 if (SI->getOpcode() == Instruction::Shl)
2671 if (Constant *ShOp = dyn_cast<Constant>(SI->getOperand(1)))
2672 return BinaryOperator::CreateMul(SI->getOperand(0),
2673 ConstantExpr::getShl(CI, ShOp));
2676 return ReplaceInstUsesWith(I, Op1); // X * 0 == 0
2677 if (CI->equalsInt(1)) // X * 1 == X
2678 return ReplaceInstUsesWith(I, Op0);
2679 if (CI->isAllOnesValue()) // X * -1 == 0 - X
2680 return BinaryOperator::CreateNeg(Op0, I.getName());
2682 const APInt& Val = cast<ConstantInt>(CI)->getValue();
2683 if (Val.isPowerOf2()) { // Replace X*(2^C) with X << C
2684 return BinaryOperator::CreateShl(Op0,
2685 ConstantInt::get(Op0->getType(), Val.logBase2()));
2687 } else if (isa<VectorType>(Op1->getType())) {
2688 if (Op1->isNullValue())
2689 return ReplaceInstUsesWith(I, Op1);
2691 if (ConstantVector *Op1V = dyn_cast<ConstantVector>(Op1)) {
2692 if (Op1V->isAllOnesValue()) // X * -1 == 0 - X
2693 return BinaryOperator::CreateNeg(Op0, I.getName());
2695 // As above, vector X*splat(1.0) -> X in all defined cases.
2696 if (Constant *Splat = Op1V->getSplatValue()) {
2697 if (ConstantInt *CI = dyn_cast<ConstantInt>(Splat))
2698 if (CI->equalsInt(1))
2699 return ReplaceInstUsesWith(I, Op0);
2704 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0))
2705 if (Op0I->getOpcode() == Instruction::Add && Op0I->hasOneUse() &&
2706 isa<ConstantInt>(Op0I->getOperand(1)) && isa<ConstantInt>(Op1)) {
2707 // Canonicalize (X+C1)*C2 -> X*C2+C1*C2.
2708 Value *Add = Builder->CreateMul(Op0I->getOperand(0), Op1, "tmp");
2709 Value *C1C2 = Builder->CreateMul(Op1, Op0I->getOperand(1));
2710 return BinaryOperator::CreateAdd(Add, C1C2);
2714 // Try to fold constant mul into select arguments.
2715 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
2716 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
2719 if (isa<PHINode>(Op0))
2720 if (Instruction *NV = FoldOpIntoPhi(I))
2724 if (Value *Op0v = dyn_castNegVal(Op0)) // -X * -Y = X*Y
2725 if (Value *Op1v = dyn_castNegVal(I.getOperand(1)))
2726 return BinaryOperator::CreateMul(Op0v, Op1v);
2728 // (X / Y) * Y = X - (X % Y)
2729 // (X / Y) * -Y = (X % Y) - X
2731 Value *Op1 = I.getOperand(1);
2732 BinaryOperator *BO = dyn_cast<BinaryOperator>(Op0);
2734 (BO->getOpcode() != Instruction::UDiv &&
2735 BO->getOpcode() != Instruction::SDiv)) {
2737 BO = dyn_cast<BinaryOperator>(I.getOperand(1));
2739 Value *Neg = dyn_castNegVal(Op1);
2740 if (BO && BO->hasOneUse() &&
2741 (BO->getOperand(1) == Op1 || BO->getOperand(1) == Neg) &&
2742 (BO->getOpcode() == Instruction::UDiv ||
2743 BO->getOpcode() == Instruction::SDiv)) {
2744 Value *Op0BO = BO->getOperand(0), *Op1BO = BO->getOperand(1);
2746 // If the division is exact, X % Y is zero.
2747 if (SDivOperator *SDiv = dyn_cast<SDivOperator>(BO))
2748 if (SDiv->isExact()) {
2750 return ReplaceInstUsesWith(I, Op0BO);
2752 return BinaryOperator::CreateNeg(Op0BO);
2756 if (BO->getOpcode() == Instruction::UDiv)
2757 Rem = Builder->CreateURem(Op0BO, Op1BO);
2759 Rem = Builder->CreateSRem(Op0BO, Op1BO);
2763 return BinaryOperator::CreateSub(Op0BO, Rem);
2764 return BinaryOperator::CreateSub(Rem, Op0BO);
2768 /// i1 mul -> i1 and.
2769 if (I.getType() == Type::getInt1Ty(*Context))
2770 return BinaryOperator::CreateAnd(Op0, I.getOperand(1));
2772 // X*(1 << Y) --> X << Y
2773 // (1 << Y)*X --> X << Y
2776 if (match(Op0, m_Shl(m_One(), m_Value(Y))))
2777 return BinaryOperator::CreateShl(I.getOperand(1), Y);
2778 if (match(I.getOperand(1), m_Shl(m_One(), m_Value(Y))))
2779 return BinaryOperator::CreateShl(Op0, Y);
2782 // If one of the operands of the multiply is a cast from a boolean value, then
2783 // we know the bool is either zero or one, so this is a 'masking' multiply.
2784 // See if we can simplify things based on how the boolean was originally
2786 CastInst *BoolCast = 0;
2787 if (ZExtInst *CI = dyn_cast<ZExtInst>(Op0))
2788 if (CI->getOperand(0)->getType() == Type::getInt1Ty(*Context))
2791 if (ZExtInst *CI = dyn_cast<ZExtInst>(I.getOperand(1)))
2792 if (CI->getOperand(0)->getType() == Type::getInt1Ty(*Context))
2795 if (ICmpInst *SCI = dyn_cast<ICmpInst>(BoolCast->getOperand(0))) {
2796 Value *SCIOp0 = SCI->getOperand(0), *SCIOp1 = SCI->getOperand(1);
2797 const Type *SCOpTy = SCIOp0->getType();
2800 // If the icmp is true iff the sign bit of X is set, then convert this
2801 // multiply into a shift/and combination.
2802 if (isa<ConstantInt>(SCIOp1) &&
2803 isSignBitCheck(SCI->getPredicate(), cast<ConstantInt>(SCIOp1), TIS) &&
2805 // Shift the X value right to turn it into "all signbits".
2806 Constant *Amt = ConstantInt::get(SCIOp0->getType(),
2807 SCOpTy->getPrimitiveSizeInBits()-1);
2808 Value *V = Builder->CreateAShr(SCIOp0, Amt,
2809 BoolCast->getOperand(0)->getName()+".mask");
2811 // If the multiply type is not the same as the source type, sign extend
2812 // or truncate to the multiply type.
2813 if (I.getType() != V->getType())
2814 V = Builder->CreateIntCast(V, I.getType(), true);
2816 Value *OtherOp = Op0 == BoolCast ? I.getOperand(1) : Op0;
2817 return BinaryOperator::CreateAnd(V, OtherOp);
2822 return Changed ? &I : 0;
2825 Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
2826 bool Changed = SimplifyCommutative(I);
2827 Value *Op0 = I.getOperand(0);
2829 // Simplify mul instructions with a constant RHS...
2830 if (Constant *Op1 = dyn_cast<Constant>(I.getOperand(1))) {
2831 if (ConstantFP *Op1F = dyn_cast<ConstantFP>(Op1)) {
2832 // "In IEEE floating point, x*1 is not equivalent to x for nans. However,
2833 // ANSI says we can drop signals, so we can do this anyway." (from GCC)
2834 if (Op1F->isExactlyValue(1.0))
2835 return ReplaceInstUsesWith(I, Op0); // Eliminate 'mul double %X, 1.0'
2836 } else if (isa<VectorType>(Op1->getType())) {
2837 if (ConstantVector *Op1V = dyn_cast<ConstantVector>(Op1)) {
2838 // As above, vector X*splat(1.0) -> X in all defined cases.
2839 if (Constant *Splat = Op1V->getSplatValue()) {
2840 if (ConstantFP *F = dyn_cast<ConstantFP>(Splat))
2841 if (F->isExactlyValue(1.0))
2842 return ReplaceInstUsesWith(I, Op0);
2847 // Try to fold constant mul into select arguments.
2848 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
2849 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
2852 if (isa<PHINode>(Op0))
2853 if (Instruction *NV = FoldOpIntoPhi(I))
2857 if (Value *Op0v = dyn_castFNegVal(Op0)) // -X * -Y = X*Y
2858 if (Value *Op1v = dyn_castFNegVal(I.getOperand(1)))
2859 return BinaryOperator::CreateFMul(Op0v, Op1v);
2861 return Changed ? &I : 0;
2864 /// SimplifyDivRemOfSelect - Try to fold a divide or remainder of a select
2866 bool InstCombiner::SimplifyDivRemOfSelect(BinaryOperator &I) {
2867 SelectInst *SI = cast<SelectInst>(I.getOperand(1));
2869 // div/rem X, (Cond ? 0 : Y) -> div/rem X, Y
2870 int NonNullOperand = -1;
2871 if (Constant *ST = dyn_cast<Constant>(SI->getOperand(1)))
2872 if (ST->isNullValue())
2874 // div/rem X, (Cond ? Y : 0) -> div/rem X, Y
2875 if (Constant *ST = dyn_cast<Constant>(SI->getOperand(2)))
2876 if (ST->isNullValue())
2879 if (NonNullOperand == -1)
2882 Value *SelectCond = SI->getOperand(0);
2884 // Change the div/rem to use 'Y' instead of the select.
2885 I.setOperand(1, SI->getOperand(NonNullOperand));
2887 // Okay, we know we replace the operand of the div/rem with 'Y' with no
2888 // problem. However, the select, or the condition of the select may have
2889 // multiple uses. Based on our knowledge that the operand must be non-zero,
2890 // propagate the known value for the select into other uses of it, and
2891 // propagate a known value of the condition into its other users.
2893 // If the select and condition only have a single use, don't bother with this,
2895 if (SI->use_empty() && SelectCond->hasOneUse())
2898 // Scan the current block backward, looking for other uses of SI.
2899 BasicBlock::iterator BBI = &I, BBFront = I.getParent()->begin();
2901 while (BBI != BBFront) {
2903 // If we found a call to a function, we can't assume it will return, so
2904 // information from below it cannot be propagated above it.
2905 if (isa<CallInst>(BBI) && !isa<IntrinsicInst>(BBI))
2908 // Replace uses of the select or its condition with the known values.
2909 for (Instruction::op_iterator I = BBI->op_begin(), E = BBI->op_end();
2912 *I = SI->getOperand(NonNullOperand);
2914 } else if (*I == SelectCond) {
2915 *I = NonNullOperand == 1 ? ConstantInt::getTrue(*Context) :
2916 ConstantInt::getFalse(*Context);
2921 // If we past the instruction, quit looking for it.
2924 if (&*BBI == SelectCond)
2927 // If we ran out of things to eliminate, break out of the loop.
2928 if (SelectCond == 0 && SI == 0)
2936 /// This function implements the transforms on div instructions that work
2937 /// regardless of the kind of div instruction it is (udiv, sdiv, or fdiv). It is
2938 /// used by the visitors to those instructions.
2939 /// @brief Transforms common to all three div instructions
2940 Instruction *InstCombiner::commonDivTransforms(BinaryOperator &I) {
2941 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2943 // undef / X -> 0 for integer.
2944 // undef / X -> undef for FP (the undef could be a snan).
2945 if (isa<UndefValue>(Op0)) {
2946 if (Op0->getType()->isFPOrFPVector())
2947 return ReplaceInstUsesWith(I, Op0);
2948 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
2951 // X / undef -> undef
2952 if (isa<UndefValue>(Op1))
2953 return ReplaceInstUsesWith(I, Op1);
2958 /// This function implements the transforms common to both integer division
2959 /// instructions (udiv and sdiv). It is called by the visitors to those integer
2960 /// division instructions.
2961 /// @brief Common integer divide transforms
2962 Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
2963 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2965 // (sdiv X, X) --> 1 (udiv X, X) --> 1
2967 if (const VectorType *Ty = dyn_cast<VectorType>(I.getType())) {
2968 Constant *CI = ConstantInt::get(Ty->getElementType(), 1);
2969 std::vector<Constant*> Elts(Ty->getNumElements(), CI);
2970 return ReplaceInstUsesWith(I, ConstantVector::get(Elts));
2973 Constant *CI = ConstantInt::get(I.getType(), 1);
2974 return ReplaceInstUsesWith(I, CI);
2977 if (Instruction *Common = commonDivTransforms(I))
2980 // Handle cases involving: [su]div X, (select Cond, Y, Z)
2981 // This does not apply for fdiv.
2982 if (isa<SelectInst>(Op1) && SimplifyDivRemOfSelect(I))
2985 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
2987 if (RHS->equalsInt(1))
2988 return ReplaceInstUsesWith(I, Op0);
2990 // (X / C1) / C2 -> X / (C1*C2)
2991 if (Instruction *LHS = dyn_cast<Instruction>(Op0))
2992 if (Instruction::BinaryOps(LHS->getOpcode()) == I.getOpcode())
2993 if (ConstantInt *LHSRHS = dyn_cast<ConstantInt>(LHS->getOperand(1))) {
2994 if (MultiplyOverflows(RHS, LHSRHS,
2995 I.getOpcode()==Instruction::SDiv))
2996 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
2998 return BinaryOperator::Create(I.getOpcode(), LHS->getOperand(0),
2999 ConstantExpr::getMul(RHS, LHSRHS));
3002 if (!RHS->isZero()) { // avoid X udiv 0
3003 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
3004 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
3006 if (isa<PHINode>(Op0))
3007 if (Instruction *NV = FoldOpIntoPhi(I))
3012 // 0 / X == 0, we don't need to preserve faults!
3013 if (ConstantInt *LHS = dyn_cast<ConstantInt>(Op0))
3014 if (LHS->equalsInt(0))
3015 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
3017 // It can't be division by zero, hence it must be division by one.
3018 if (I.getType() == Type::getInt1Ty(*Context))
3019 return ReplaceInstUsesWith(I, Op0);
3021 if (ConstantVector *Op1V = dyn_cast<ConstantVector>(Op1)) {
3022 if (ConstantInt *X = cast_or_null<ConstantInt>(Op1V->getSplatValue()))
3025 return ReplaceInstUsesWith(I, Op0);
3031 Instruction *InstCombiner::visitUDiv(BinaryOperator &I) {
3032 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3034 // Handle the integer div common cases
3035 if (Instruction *Common = commonIDivTransforms(I))
3038 if (ConstantInt *C = dyn_cast<ConstantInt>(Op1)) {
3039 // X udiv C^2 -> X >> C
3040 // Check to see if this is an unsigned division with an exact power of 2,
3041 // if so, convert to a right shift.
3042 if (C->getValue().isPowerOf2()) // 0 not included in isPowerOf2
3043 return BinaryOperator::CreateLShr(Op0,
3044 ConstantInt::get(Op0->getType(), C->getValue().logBase2()));
3046 // X udiv C, where C >= signbit
3047 if (C->getValue().isNegative()) {
3048 Value *IC = Builder->CreateICmpULT( Op0, C);
3049 return SelectInst::Create(IC, Constant::getNullValue(I.getType()),
3050 ConstantInt::get(I.getType(), 1));
3054 // X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2)
3055 if (BinaryOperator *RHSI = dyn_cast<BinaryOperator>(I.getOperand(1))) {
3056 if (RHSI->getOpcode() == Instruction::Shl &&
3057 isa<ConstantInt>(RHSI->getOperand(0))) {
3058 const APInt& C1 = cast<ConstantInt>(RHSI->getOperand(0))->getValue();
3059 if (C1.isPowerOf2()) {
3060 Value *N = RHSI->getOperand(1);
3061 const Type *NTy = N->getType();
3062 if (uint32_t C2 = C1.logBase2())
3063 N = Builder->CreateAdd(N, ConstantInt::get(NTy, C2), "tmp");
3064 return BinaryOperator::CreateLShr(Op0, N);
3069 // udiv X, (Select Cond, C1, C2) --> Select Cond, (shr X, C1), (shr X, C2)
3070 // where C1&C2 are powers of two.
3071 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
3072 if (ConstantInt *STO = dyn_cast<ConstantInt>(SI->getOperand(1)))
3073 if (ConstantInt *SFO = dyn_cast<ConstantInt>(SI->getOperand(2))) {
3074 const APInt &TVA = STO->getValue(), &FVA = SFO->getValue();
3075 if (TVA.isPowerOf2() && FVA.isPowerOf2()) {
3076 // Compute the shift amounts
3077 uint32_t TSA = TVA.logBase2(), FSA = FVA.logBase2();
3078 // Construct the "on true" case of the select
3079 Constant *TC = ConstantInt::get(Op0->getType(), TSA);
3080 Value *TSI = Builder->CreateLShr(Op0, TC, SI->getName()+".t");
3082 // Construct the "on false" case of the select
3083 Constant *FC = ConstantInt::get(Op0->getType(), FSA);
3084 Value *FSI = Builder->CreateLShr(Op0, FC, SI->getName()+".f");
3086 // construct the select instruction and return it.
3087 return SelectInst::Create(SI->getOperand(0), TSI, FSI, SI->getName());
3093 Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
3094 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3096 // Handle the integer div common cases
3097 if (Instruction *Common = commonIDivTransforms(I))
3100 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
3102 if (RHS->isAllOnesValue())
3103 return BinaryOperator::CreateNeg(Op0);
3105 // sdiv X, C --> ashr X, log2(C)
3106 if (cast<SDivOperator>(&I)->isExact() &&
3107 RHS->getValue().isNonNegative() &&
3108 RHS->getValue().isPowerOf2()) {
3109 Value *ShAmt = llvm::ConstantInt::get(RHS->getType(),
3110 RHS->getValue().exactLogBase2());
3111 return BinaryOperator::CreateAShr(Op0, ShAmt, I.getName());
3114 // -X/C --> X/-C provided the negation doesn't overflow.
3115 if (SubOperator *Sub = dyn_cast<SubOperator>(Op0))
3116 if (isa<Constant>(Sub->getOperand(0)) &&
3117 cast<Constant>(Sub->getOperand(0))->isNullValue() &&
3118 Sub->hasNoSignedWrap())
3119 return BinaryOperator::CreateSDiv(Sub->getOperand(1),
3120 ConstantExpr::getNeg(RHS));
3123 // If the sign bits of both operands are zero (i.e. we can prove they are
3124 // unsigned inputs), turn this into a udiv.
3125 if (I.getType()->isInteger()) {
3126 APInt Mask(APInt::getSignBit(I.getType()->getPrimitiveSizeInBits()));
3127 if (MaskedValueIsZero(Op0, Mask)) {
3128 if (MaskedValueIsZero(Op1, Mask)) {
3129 // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set
3130 return BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
3132 ConstantInt *ShiftedInt;
3133 if (match(Op1, m_Shl(m_ConstantInt(ShiftedInt), m_Value())) &&
3134 ShiftedInt->getValue().isPowerOf2()) {
3135 // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y)
3136 // Safe because the only negative value (1 << Y) can take on is
3137 // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have
3138 // the sign bit set.
3139 return BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
3147 Instruction *InstCombiner::visitFDiv(BinaryOperator &I) {
3148 return commonDivTransforms(I);
3151 /// This function implements the transforms on rem instructions that work
3152 /// regardless of the kind of rem instruction it is (urem, srem, or frem). It
3153 /// is used by the visitors to those instructions.
3154 /// @brief Transforms common to all three rem instructions
3155 Instruction *InstCombiner::commonRemTransforms(BinaryOperator &I) {
3156 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3158 if (isa<UndefValue>(Op0)) { // undef % X -> 0
3159 if (I.getType()->isFPOrFPVector())
3160 return ReplaceInstUsesWith(I, Op0); // X % undef -> undef (could be SNaN)
3161 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
3163 if (isa<UndefValue>(Op1))
3164 return ReplaceInstUsesWith(I, Op1); // X % undef -> undef
3166 // Handle cases involving: rem X, (select Cond, Y, Z)
3167 if (isa<SelectInst>(Op1) && SimplifyDivRemOfSelect(I))
3173 /// This function implements the transforms common to both integer remainder
3174 /// instructions (urem and srem). It is called by the visitors to those integer
3175 /// remainder instructions.
3176 /// @brief Common integer remainder transforms
3177 Instruction *InstCombiner::commonIRemTransforms(BinaryOperator &I) {
3178 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3180 if (Instruction *common = commonRemTransforms(I))
3183 // 0 % X == 0 for integer, we don't need to preserve faults!
3184 if (Constant *LHS = dyn_cast<Constant>(Op0))
3185 if (LHS->isNullValue())
3186 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
3188 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
3189 // X % 0 == undef, we don't need to preserve faults!
3190 if (RHS->equalsInt(0))
3191 return ReplaceInstUsesWith(I, UndefValue::get(I.getType()));
3193 if (RHS->equalsInt(1)) // X % 1 == 0
3194 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
3196 if (Instruction *Op0I = dyn_cast<Instruction>(Op0)) {
3197 if (SelectInst *SI = dyn_cast<SelectInst>(Op0I)) {
3198 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
3200 } else if (isa<PHINode>(Op0I)) {
3201 if (Instruction *NV = FoldOpIntoPhi(I))
3205 // See if we can fold away this rem instruction.
3206 if (SimplifyDemandedInstructionBits(I))
3214 Instruction *InstCombiner::visitURem(BinaryOperator &I) {
3215 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3217 if (Instruction *common = commonIRemTransforms(I))
3220 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
3221 // X urem C^2 -> X and C
3222 // Check to see if this is an unsigned remainder with an exact power of 2,
3223 // if so, convert to a bitwise and.
3224 if (ConstantInt *C = dyn_cast<ConstantInt>(RHS))
3225 if (C->getValue().isPowerOf2())
3226 return BinaryOperator::CreateAnd(Op0, SubOne(C));
3229 if (Instruction *RHSI = dyn_cast<Instruction>(I.getOperand(1))) {
3230 // Turn A % (C << N), where C is 2^k, into A & ((C << N)-1)
3231 if (RHSI->getOpcode() == Instruction::Shl &&
3232 isa<ConstantInt>(RHSI->getOperand(0))) {
3233 if (cast<ConstantInt>(RHSI->getOperand(0))->getValue().isPowerOf2()) {
3234 Constant *N1 = Constant::getAllOnesValue(I.getType());
3235 Value *Add = Builder->CreateAdd(RHSI, N1, "tmp");
3236 return BinaryOperator::CreateAnd(Op0, Add);
3241 // urem X, (select Cond, 2^C1, 2^C2) --> select Cond, (and X, C1), (and X, C2)
3242 // where C1&C2 are powers of two.
3243 if (SelectInst *SI = dyn_cast<SelectInst>(Op1)) {
3244 if (ConstantInt *STO = dyn_cast<ConstantInt>(SI->getOperand(1)))
3245 if (ConstantInt *SFO = dyn_cast<ConstantInt>(SI->getOperand(2))) {
3246 // STO == 0 and SFO == 0 handled above.
3247 if ((STO->getValue().isPowerOf2()) &&
3248 (SFO->getValue().isPowerOf2())) {
3249 Value *TrueAnd = Builder->CreateAnd(Op0, SubOne(STO),
3250 SI->getName()+".t");
3251 Value *FalseAnd = Builder->CreateAnd(Op0, SubOne(SFO),
3252 SI->getName()+".f");
3253 return SelectInst::Create(SI->getOperand(0), TrueAnd, FalseAnd);
3261 Instruction *InstCombiner::visitSRem(BinaryOperator &I) {
3262 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3264 // Handle the integer rem common cases
3265 if (Instruction *Common = commonIRemTransforms(I))
3268 if (Value *RHSNeg = dyn_castNegVal(Op1))
3269 if (!isa<Constant>(RHSNeg) ||
3270 (isa<ConstantInt>(RHSNeg) &&
3271 cast<ConstantInt>(RHSNeg)->getValue().isStrictlyPositive())) {
3273 Worklist.AddValue(I.getOperand(1));
3274 I.setOperand(1, RHSNeg);
3278 // If the sign bits of both operands are zero (i.e. we can prove they are
3279 // unsigned inputs), turn this into a urem.
3280 if (I.getType()->isInteger()) {
3281 APInt Mask(APInt::getSignBit(I.getType()->getPrimitiveSizeInBits()));
3282 if (MaskedValueIsZero(Op1, Mask) && MaskedValueIsZero(Op0, Mask)) {
3283 // X srem Y -> X urem Y, iff X and Y don't have sign bit set
3284 return BinaryOperator::CreateURem(Op0, Op1, I.getName());
3288 // If it's a constant vector, flip any negative values positive.
3289 if (ConstantVector *RHSV = dyn_cast<ConstantVector>(Op1)) {
3290 unsigned VWidth = RHSV->getNumOperands();
3292 bool hasNegative = false;
3293 for (unsigned i = 0; !hasNegative && i != VWidth; ++i)
3294 if (ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV->getOperand(i)))
3295 if (RHS->getValue().isNegative())
3299 std::vector<Constant *> Elts(VWidth);
3300 for (unsigned i = 0; i != VWidth; ++i) {
3301 if (ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV->getOperand(i))) {
3302 if (RHS->getValue().isNegative())
3303 Elts[i] = cast<ConstantInt>(ConstantExpr::getNeg(RHS));
3309 Constant *NewRHSV = ConstantVector::get(Elts);
3310 if (NewRHSV != RHSV) {
3311 Worklist.AddValue(I.getOperand(1));
3312 I.setOperand(1, NewRHSV);
3321 Instruction *InstCombiner::visitFRem(BinaryOperator &I) {
3322 return commonRemTransforms(I);
3325 // isOneBitSet - Return true if there is exactly one bit set in the specified
3327 static bool isOneBitSet(const ConstantInt *CI) {
3328 return CI->getValue().isPowerOf2();
3331 // isHighOnes - Return true if the constant is of the form 1+0+.
3332 // This is the same as lowones(~X).
3333 static bool isHighOnes(const ConstantInt *CI) {
3334 return (~CI->getValue() + 1).isPowerOf2();
3337 /// getICmpCode - Encode a icmp predicate into a three bit mask. These bits
3338 /// are carefully arranged to allow folding of expressions such as:
3340 /// (A < B) | (A > B) --> (A != B)
3342 /// Note that this is only valid if the first and second predicates have the
3343 /// same sign. Is illegal to do: (A u< B) | (A s> B)
3345 /// Three bits are used to represent the condition, as follows:
3350 /// <=> Value Definition
3351 /// 000 0 Always false
3358 /// 111 7 Always true
3360 static unsigned getICmpCode(const ICmpInst *ICI) {
3361 switch (ICI->getPredicate()) {
3363 case ICmpInst::ICMP_UGT: return 1; // 001
3364 case ICmpInst::ICMP_SGT: return 1; // 001
3365 case ICmpInst::ICMP_EQ: return 2; // 010
3366 case ICmpInst::ICMP_UGE: return 3; // 011
3367 case ICmpInst::ICMP_SGE: return 3; // 011
3368 case ICmpInst::ICMP_ULT: return 4; // 100
3369 case ICmpInst::ICMP_SLT: return 4; // 100
3370 case ICmpInst::ICMP_NE: return 5; // 101
3371 case ICmpInst::ICMP_ULE: return 6; // 110
3372 case ICmpInst::ICMP_SLE: return 6; // 110
3375 llvm_unreachable("Invalid ICmp predicate!");
3380 /// getFCmpCode - Similar to getICmpCode but for FCmpInst. This encodes a fcmp
3381 /// predicate into a three bit mask. It also returns whether it is an ordered
3382 /// predicate by reference.
3383 static unsigned getFCmpCode(FCmpInst::Predicate CC, bool &isOrdered) {
3386 case FCmpInst::FCMP_ORD: isOrdered = true; return 0; // 000
3387 case FCmpInst::FCMP_UNO: return 0; // 000
3388 case FCmpInst::FCMP_OGT: isOrdered = true; return 1; // 001
3389 case FCmpInst::FCMP_UGT: return 1; // 001
3390 case FCmpInst::FCMP_OEQ: isOrdered = true; return 2; // 010
3391 case FCmpInst::FCMP_UEQ: return 2; // 010
3392 case FCmpInst::FCMP_OGE: isOrdered = true; return 3; // 011
3393 case FCmpInst::FCMP_UGE: return 3; // 011
3394 case FCmpInst::FCMP_OLT: isOrdered = true; return 4; // 100
3395 case FCmpInst::FCMP_ULT: return 4; // 100
3396 case FCmpInst::FCMP_ONE: isOrdered = true; return 5; // 101
3397 case FCmpInst::FCMP_UNE: return 5; // 101
3398 case FCmpInst::FCMP_OLE: isOrdered = true; return 6; // 110
3399 case FCmpInst::FCMP_ULE: return 6; // 110
3402 // Not expecting FCMP_FALSE and FCMP_TRUE;
3403 llvm_unreachable("Unexpected FCmp predicate!");
3408 /// getICmpValue - This is the complement of getICmpCode, which turns an
3409 /// opcode and two operands into either a constant true or false, or a brand
3410 /// new ICmp instruction. The sign is passed in to determine which kind
3411 /// of predicate to use in the new icmp instruction.
3412 static Value *getICmpValue(bool sign, unsigned code, Value *LHS, Value *RHS,
3413 LLVMContext *Context) {
3415 default: llvm_unreachable("Illegal ICmp code!");
3416 case 0: return ConstantInt::getFalse(*Context);
3419 return new ICmpInst(ICmpInst::ICMP_SGT, LHS, RHS);
3421 return new ICmpInst(ICmpInst::ICMP_UGT, LHS, RHS);
3422 case 2: return new ICmpInst(ICmpInst::ICMP_EQ, LHS, RHS);
3425 return new ICmpInst(ICmpInst::ICMP_SGE, LHS, RHS);
3427 return new ICmpInst(ICmpInst::ICMP_UGE, LHS, RHS);
3430 return new ICmpInst(ICmpInst::ICMP_SLT, LHS, RHS);
3432 return new ICmpInst(ICmpInst::ICMP_ULT, LHS, RHS);
3433 case 5: return new ICmpInst(ICmpInst::ICMP_NE, LHS, RHS);
3436 return new ICmpInst(ICmpInst::ICMP_SLE, LHS, RHS);
3438 return new ICmpInst(ICmpInst::ICMP_ULE, LHS, RHS);
3439 case 7: return ConstantInt::getTrue(*Context);
3443 /// getFCmpValue - This is the complement of getFCmpCode, which turns an
3444 /// opcode and two operands into either a FCmp instruction. isordered is passed
3445 /// in to determine which kind of predicate to use in the new fcmp instruction.
3446 static Value *getFCmpValue(bool isordered, unsigned code,
3447 Value *LHS, Value *RHS, LLVMContext *Context) {
3449 default: llvm_unreachable("Illegal FCmp code!");
3452 return new FCmpInst(FCmpInst::FCMP_ORD, LHS, RHS);
3454 return new FCmpInst(FCmpInst::FCMP_UNO, LHS, RHS);
3457 return new FCmpInst(FCmpInst::FCMP_OGT, LHS, RHS);
3459 return new FCmpInst(FCmpInst::FCMP_UGT, LHS, RHS);
3462 return new FCmpInst(FCmpInst::FCMP_OEQ, LHS, RHS);
3464 return new FCmpInst(FCmpInst::FCMP_UEQ, LHS, RHS);
3467 return new FCmpInst(FCmpInst::FCMP_OGE, LHS, RHS);
3469 return new FCmpInst(FCmpInst::FCMP_UGE, LHS, RHS);
3472 return new FCmpInst(FCmpInst::FCMP_OLT, LHS, RHS);
3474 return new FCmpInst(FCmpInst::FCMP_ULT, LHS, RHS);
3477 return new FCmpInst(FCmpInst::FCMP_ONE, LHS, RHS);
3479 return new FCmpInst(FCmpInst::FCMP_UNE, LHS, RHS);
3482 return new FCmpInst(FCmpInst::FCMP_OLE, LHS, RHS);
3484 return new FCmpInst(FCmpInst::FCMP_ULE, LHS, RHS);
3485 case 7: return ConstantInt::getTrue(*Context);
3489 /// PredicatesFoldable - Return true if both predicates match sign or if at
3490 /// least one of them is an equality comparison (which is signless).
3491 static bool PredicatesFoldable(ICmpInst::Predicate p1, ICmpInst::Predicate p2) {
3492 return (ICmpInst::isSignedPredicate(p1) == ICmpInst::isSignedPredicate(p2)) ||
3493 (ICmpInst::isSignedPredicate(p1) && ICmpInst::isEquality(p2)) ||
3494 (ICmpInst::isSignedPredicate(p2) && ICmpInst::isEquality(p1));
3498 // FoldICmpLogical - Implements (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
3499 struct FoldICmpLogical {
3502 ICmpInst::Predicate pred;
3503 FoldICmpLogical(InstCombiner &ic, ICmpInst *ICI)
3504 : IC(ic), LHS(ICI->getOperand(0)), RHS(ICI->getOperand(1)),
3505 pred(ICI->getPredicate()) {}
3506 bool shouldApply(Value *V) const {
3507 if (ICmpInst *ICI = dyn_cast<ICmpInst>(V))
3508 if (PredicatesFoldable(pred, ICI->getPredicate()))
3509 return ((ICI->getOperand(0) == LHS && ICI->getOperand(1) == RHS) ||
3510 (ICI->getOperand(0) == RHS && ICI->getOperand(1) == LHS));
3513 Instruction *apply(Instruction &Log) const {
3514 ICmpInst *ICI = cast<ICmpInst>(Log.getOperand(0));
3515 if (ICI->getOperand(0) != LHS) {
3516 assert(ICI->getOperand(1) == LHS);
3517 ICI->swapOperands(); // Swap the LHS and RHS of the ICmp
3520 ICmpInst *RHSICI = cast<ICmpInst>(Log.getOperand(1));
3521 unsigned LHSCode = getICmpCode(ICI);
3522 unsigned RHSCode = getICmpCode(RHSICI);
3524 switch (Log.getOpcode()) {
3525 case Instruction::And: Code = LHSCode & RHSCode; break;
3526 case Instruction::Or: Code = LHSCode | RHSCode; break;
3527 case Instruction::Xor: Code = LHSCode ^ RHSCode; break;
3528 default: llvm_unreachable("Illegal logical opcode!"); return 0;
3531 bool isSigned = ICmpInst::isSignedPredicate(RHSICI->getPredicate()) ||
3532 ICmpInst::isSignedPredicate(ICI->getPredicate());
3534 Value *RV = getICmpValue(isSigned, Code, LHS, RHS, IC.getContext());
3535 if (Instruction *I = dyn_cast<Instruction>(RV))
3537 // Otherwise, it's a constant boolean value...
3538 return IC.ReplaceInstUsesWith(Log, RV);
3541 } // end anonymous namespace
3543 // OptAndOp - This handles expressions of the form ((val OP C1) & C2). Where
3544 // the Op parameter is 'OP', OpRHS is 'C1', and AndRHS is 'C2'. Op is
3545 // guaranteed to be a binary operator.
3546 Instruction *InstCombiner::OptAndOp(Instruction *Op,
3548 ConstantInt *AndRHS,
3549 BinaryOperator &TheAnd) {
3550 Value *X = Op->getOperand(0);
3551 Constant *Together = 0;
3553 Together = ConstantExpr::getAnd(AndRHS, OpRHS);
3555 switch (Op->getOpcode()) {
3556 case Instruction::Xor:
3557 if (Op->hasOneUse()) {
3558 // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
3559 Value *And = Builder->CreateAnd(X, AndRHS);
3561 return BinaryOperator::CreateXor(And, Together);
3564 case Instruction::Or:
3565 if (Together == AndRHS) // (X | C) & C --> C
3566 return ReplaceInstUsesWith(TheAnd, AndRHS);
3568 if (Op->hasOneUse() && Together != OpRHS) {
3569 // (X | C1) & C2 --> (X | (C1&C2)) & C2
3570 Value *Or = Builder->CreateOr(X, Together);
3572 return BinaryOperator::CreateAnd(Or, AndRHS);
3575 case Instruction::Add:
3576 if (Op->hasOneUse()) {
3577 // Adding a one to a single bit bit-field should be turned into an XOR
3578 // of the bit. First thing to check is to see if this AND is with a
3579 // single bit constant.
3580 const APInt& AndRHSV = cast<ConstantInt>(AndRHS)->getValue();
3582 // If there is only one bit set...
3583 if (isOneBitSet(cast<ConstantInt>(AndRHS))) {
3584 // Ok, at this point, we know that we are masking the result of the
3585 // ADD down to exactly one bit. If the constant we are adding has
3586 // no bits set below this bit, then we can eliminate the ADD.
3587 const APInt& AddRHS = cast<ConstantInt>(OpRHS)->getValue();
3589 // Check to see if any bits below the one bit set in AndRHSV are set.
3590 if ((AddRHS & (AndRHSV-1)) == 0) {
3591 // If not, the only thing that can effect the output of the AND is
3592 // the bit specified by AndRHSV. If that bit is set, the effect of
3593 // the XOR is to toggle the bit. If it is clear, then the ADD has
3595 if ((AddRHS & AndRHSV) == 0) { // Bit is not set, noop
3596 TheAnd.setOperand(0, X);
3599 // Pull the XOR out of the AND.
3600 Value *NewAnd = Builder->CreateAnd(X, AndRHS);
3601 NewAnd->takeName(Op);
3602 return BinaryOperator::CreateXor(NewAnd, AndRHS);
3609 case Instruction::Shl: {
3610 // We know that the AND will not produce any of the bits shifted in, so if
3611 // the anded constant includes them, clear them now!
3613 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
3614 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
3615 APInt ShlMask(APInt::getHighBitsSet(BitWidth, BitWidth-OpRHSVal));
3616 ConstantInt *CI = ConstantInt::get(*Context, AndRHS->getValue() & ShlMask);
3618 if (CI->getValue() == ShlMask) {
3619 // Masking out bits that the shift already masks
3620 return ReplaceInstUsesWith(TheAnd, Op); // No need for the and.
3621 } else if (CI != AndRHS) { // Reducing bits set in and.
3622 TheAnd.setOperand(1, CI);
3627 case Instruction::LShr:
3629 // We know that the AND will not produce any of the bits shifted in, so if
3630 // the anded constant includes them, clear them now! This only applies to
3631 // unsigned shifts, because a signed shr may bring in set bits!
3633 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
3634 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
3635 APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
3636 ConstantInt *CI = ConstantInt::get(*Context, AndRHS->getValue() & ShrMask);
3638 if (CI->getValue() == ShrMask) {
3639 // Masking out bits that the shift already masks.
3640 return ReplaceInstUsesWith(TheAnd, Op);
3641 } else if (CI != AndRHS) {
3642 TheAnd.setOperand(1, CI); // Reduce bits set in and cst.
3647 case Instruction::AShr:
3649 // See if this is shifting in some sign extension, then masking it out
3651 if (Op->hasOneUse()) {
3652 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
3653 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
3654 APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
3655 Constant *C = ConstantInt::get(*Context, AndRHS->getValue() & ShrMask);
3656 if (C == AndRHS) { // Masking out bits shifted in.
3657 // (Val ashr C1) & C2 -> (Val lshr C1) & C2
3658 // Make the argument unsigned.
3659 Value *ShVal = Op->getOperand(0);
3660 ShVal = Builder->CreateLShr(ShVal, OpRHS, Op->getName());
3661 return BinaryOperator::CreateAnd(ShVal, AndRHS, TheAnd.getName());
3670 /// InsertRangeTest - Emit a computation of: (V >= Lo && V < Hi) if Inside is
3671 /// true, otherwise (V < Lo || V >= Hi). In pratice, we emit the more efficient
3672 /// (V-Lo) <u Hi-Lo. This method expects that Lo <= Hi. isSigned indicates
3673 /// whether to treat the V, Lo and HI as signed or not. IB is the location to
3674 /// insert new instructions.
3675 Instruction *InstCombiner::InsertRangeTest(Value *V, Constant *Lo, Constant *Hi,
3676 bool isSigned, bool Inside,
3678 assert(cast<ConstantInt>(ConstantExpr::getICmp((isSigned ?
3679 ICmpInst::ICMP_SLE:ICmpInst::ICMP_ULE), Lo, Hi))->getZExtValue() &&
3680 "Lo is not <= Hi in range emission code!");
3683 if (Lo == Hi) // Trivially false.
3684 return new ICmpInst(ICmpInst::ICMP_NE, V, V);
3686 // V >= Min && V < Hi --> V < Hi
3687 if (cast<ConstantInt>(Lo)->isMinValue(isSigned)) {
3688 ICmpInst::Predicate pred = (isSigned ?
3689 ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT);
3690 return new ICmpInst(pred, V, Hi);
3693 // Emit V-Lo <u Hi-Lo
3694 Constant *NegLo = ConstantExpr::getNeg(Lo);
3695 Value *Add = Builder->CreateAdd(V, NegLo, V->getName()+".off");
3696 Constant *UpperBound = ConstantExpr::getAdd(NegLo, Hi);
3697 return new ICmpInst(ICmpInst::ICMP_ULT, Add, UpperBound);
3700 if (Lo == Hi) // Trivially true.
3701 return new ICmpInst(ICmpInst::ICMP_EQ, V, V);
3703 // V < Min || V >= Hi -> V > Hi-1
3704 Hi = SubOne(cast<ConstantInt>(Hi));
3705 if (cast<ConstantInt>(Lo)->isMinValue(isSigned)) {
3706 ICmpInst::Predicate pred = (isSigned ?
3707 ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT);
3708 return new ICmpInst(pred, V, Hi);
3711 // Emit V-Lo >u Hi-1-Lo
3712 // Note that Hi has already had one subtracted from it, above.
3713 ConstantInt *NegLo = cast<ConstantInt>(ConstantExpr::getNeg(Lo));
3714 Value *Add = Builder->CreateAdd(V, NegLo, V->getName()+".off");
3715 Constant *LowerBound = ConstantExpr::getAdd(NegLo, Hi);
3716 return new ICmpInst(ICmpInst::ICMP_UGT, Add, LowerBound);
3719 // isRunOfOnes - Returns true iff Val consists of one contiguous run of 1s with
3720 // any number of 0s on either side. The 1s are allowed to wrap from LSB to
3721 // MSB, so 0x000FFF0, 0x0000FFFF, and 0xFF0000FF are all runs. 0x0F0F0000 is
3722 // not, since all 1s are not contiguous.
3723 static bool isRunOfOnes(ConstantInt *Val, uint32_t &MB, uint32_t &ME) {
3724 const APInt& V = Val->getValue();
3725 uint32_t BitWidth = Val->getType()->getBitWidth();
3726 if (!APIntOps::isShiftedMask(BitWidth, V)) return false;
3728 // look for the first zero bit after the run of ones
3729 MB = BitWidth - ((V - 1) ^ V).countLeadingZeros();
3730 // look for the first non-zero bit
3731 ME = V.getActiveBits();
3735 /// FoldLogicalPlusAnd - This is part of an expression (LHS +/- RHS) & Mask,
3736 /// where isSub determines whether the operator is a sub. If we can fold one of
3737 /// the following xforms:
3739 /// ((A & N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == Mask
3740 /// ((A | N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
3741 /// ((A ^ N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
3743 /// return (A +/- B).
3745 Value *InstCombiner::FoldLogicalPlusAnd(Value *LHS, Value *RHS,
3746 ConstantInt *Mask, bool isSub,
3748 Instruction *LHSI = dyn_cast<Instruction>(LHS);
3749 if (!LHSI || LHSI->getNumOperands() != 2 ||
3750 !isa<ConstantInt>(LHSI->getOperand(1))) return 0;
3752 ConstantInt *N = cast<ConstantInt>(LHSI->getOperand(1));
3754 switch (LHSI->getOpcode()) {
3756 case Instruction::And:
3757 if (ConstantExpr::getAnd(N, Mask) == Mask) {
3758 // If the AndRHS is a power of two minus one (0+1+), this is simple.
3759 if ((Mask->getValue().countLeadingZeros() +
3760 Mask->getValue().countPopulation()) ==
3761 Mask->getValue().getBitWidth())
3764 // Otherwise, if Mask is 0+1+0+, and if B is known to have the low 0+
3765 // part, we don't need any explicit masks to take them out of A. If that
3766 // is all N is, ignore it.
3767 uint32_t MB = 0, ME = 0;
3768 if (isRunOfOnes(Mask, MB, ME)) { // begin/end bit of run, inclusive
3769 uint32_t BitWidth = cast<IntegerType>(RHS->getType())->getBitWidth();
3770 APInt Mask(APInt::getLowBitsSet(BitWidth, MB-1));
3771 if (MaskedValueIsZero(RHS, Mask))
3776 case Instruction::Or:
3777 case Instruction::Xor:
3778 // If the AndRHS is a power of two minus one (0+1+), and N&Mask == 0
3779 if ((Mask->getValue().countLeadingZeros() +
3780 Mask->getValue().countPopulation()) == Mask->getValue().getBitWidth()
3781 && ConstantExpr::getAnd(N, Mask)->isNullValue())
3787 return Builder->CreateSub(LHSI->getOperand(0), RHS, "fold");
3788 return Builder->CreateAdd(LHSI->getOperand(0), RHS, "fold");
3791 /// FoldAndOfICmps - Fold (icmp)&(icmp) if possible.
3792 Instruction *InstCombiner::FoldAndOfICmps(Instruction &I,
3793 ICmpInst *LHS, ICmpInst *RHS) {
3795 ConstantInt *LHSCst, *RHSCst;
3796 ICmpInst::Predicate LHSCC, RHSCC;
3798 // This only handles icmp of constants: (icmp1 A, C1) & (icmp2 B, C2).
3799 if (!match(LHS, m_ICmp(LHSCC, m_Value(Val),
3800 m_ConstantInt(LHSCst))) ||
3801 !match(RHS, m_ICmp(RHSCC, m_Value(Val2),
3802 m_ConstantInt(RHSCst))))
3805 // (icmp ult A, C) & (icmp ult B, C) --> (icmp ult (A|B), C)
3806 // where C is a power of 2
3807 if (LHSCst == RHSCst && LHSCC == RHSCC && LHSCC == ICmpInst::ICMP_ULT &&
3808 LHSCst->getValue().isPowerOf2()) {
3809 Value *NewOr = Builder->CreateOr(Val, Val2);
3810 return new ICmpInst(LHSCC, NewOr, LHSCst);
3813 // From here on, we only handle:
3814 // (icmp1 A, C1) & (icmp2 A, C2) --> something simpler.
3815 if (Val != Val2) return 0;
3817 // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
3818 if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE ||
3819 RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE ||
3820 LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE ||
3821 RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE)
3824 // We can't fold (ugt x, C) & (sgt x, C2).
3825 if (!PredicatesFoldable(LHSCC, RHSCC))
3828 // Ensure that the larger constant is on the RHS.
3830 if (ICmpInst::isSignedPredicate(LHSCC) ||
3831 (ICmpInst::isEquality(LHSCC) &&
3832 ICmpInst::isSignedPredicate(RHSCC)))
3833 ShouldSwap = LHSCst->getValue().sgt(RHSCst->getValue());
3835 ShouldSwap = LHSCst->getValue().ugt(RHSCst->getValue());
3838 std::swap(LHS, RHS);
3839 std::swap(LHSCst, RHSCst);
3840 std::swap(LHSCC, RHSCC);
3843 // At this point, we know we have have two icmp instructions
3844 // comparing a value against two constants and and'ing the result
3845 // together. Because of the above check, we know that we only have
3846 // icmp eq, icmp ne, icmp [su]lt, and icmp [SU]gt here. We also know
3847 // (from the FoldICmpLogical check above), that the two constants
3848 // are not equal and that the larger constant is on the RHS
3849 assert(LHSCst != RHSCst && "Compares not folded above?");
3852 default: llvm_unreachable("Unknown integer condition code!");
3853 case ICmpInst::ICMP_EQ:
3855 default: llvm_unreachable("Unknown integer condition code!");
3856 case ICmpInst::ICMP_EQ: // (X == 13 & X == 15) -> false
3857 case ICmpInst::ICMP_UGT: // (X == 13 & X > 15) -> false
3858 case ICmpInst::ICMP_SGT: // (X == 13 & X > 15) -> false
3859 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
3860 case ICmpInst::ICMP_NE: // (X == 13 & X != 15) -> X == 13
3861 case ICmpInst::ICMP_ULT: // (X == 13 & X < 15) -> X == 13
3862 case ICmpInst::ICMP_SLT: // (X == 13 & X < 15) -> X == 13
3863 return ReplaceInstUsesWith(I, LHS);
3865 case ICmpInst::ICMP_NE:
3867 default: llvm_unreachable("Unknown integer condition code!");
3868 case ICmpInst::ICMP_ULT:
3869 if (LHSCst == SubOne(RHSCst)) // (X != 13 & X u< 14) -> X < 13
3870 return new ICmpInst(ICmpInst::ICMP_ULT, Val, LHSCst);
3871 break; // (X != 13 & X u< 15) -> no change
3872 case ICmpInst::ICMP_SLT:
3873 if (LHSCst == SubOne(RHSCst)) // (X != 13 & X s< 14) -> X < 13
3874 return new ICmpInst(ICmpInst::ICMP_SLT, Val, LHSCst);
3875 break; // (X != 13 & X s< 15) -> no change
3876 case ICmpInst::ICMP_EQ: // (X != 13 & X == 15) -> X == 15
3877 case ICmpInst::ICMP_UGT: // (X != 13 & X u> 15) -> X u> 15
3878 case ICmpInst::ICMP_SGT: // (X != 13 & X s> 15) -> X s> 15
3879 return ReplaceInstUsesWith(I, RHS);
3880 case ICmpInst::ICMP_NE:
3881 if (LHSCst == SubOne(RHSCst)){// (X != 13 & X != 14) -> X-13 >u 1
3882 Constant *AddCST = ConstantExpr::getNeg(LHSCst);
3883 Value *Add = Builder->CreateAdd(Val, AddCST, Val->getName()+".off");
3884 return new ICmpInst(ICmpInst::ICMP_UGT, Add,
3885 ConstantInt::get(Add->getType(), 1));
3887 break; // (X != 13 & X != 15) -> no change
3890 case ICmpInst::ICMP_ULT:
3892 default: llvm_unreachable("Unknown integer condition code!");
3893 case ICmpInst::ICMP_EQ: // (X u< 13 & X == 15) -> false
3894 case ICmpInst::ICMP_UGT: // (X u< 13 & X u> 15) -> false
3895 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
3896 case ICmpInst::ICMP_SGT: // (X u< 13 & X s> 15) -> no change
3898 case ICmpInst::ICMP_NE: // (X u< 13 & X != 15) -> X u< 13
3899 case ICmpInst::ICMP_ULT: // (X u< 13 & X u< 15) -> X u< 13
3900 return ReplaceInstUsesWith(I, LHS);
3901 case ICmpInst::ICMP_SLT: // (X u< 13 & X s< 15) -> no change
3905 case ICmpInst::ICMP_SLT:
3907 default: llvm_unreachable("Unknown integer condition code!");
3908 case ICmpInst::ICMP_EQ: // (X s< 13 & X == 15) -> false
3909 case ICmpInst::ICMP_SGT: // (X s< 13 & X s> 15) -> false
3910 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
3911 case ICmpInst::ICMP_UGT: // (X s< 13 & X u> 15) -> no change
3913 case ICmpInst::ICMP_NE: // (X s< 13 & X != 15) -> X < 13
3914 case ICmpInst::ICMP_SLT: // (X s< 13 & X s< 15) -> X < 13
3915 return ReplaceInstUsesWith(I, LHS);
3916 case ICmpInst::ICMP_ULT: // (X s< 13 & X u< 15) -> no change
3920 case ICmpInst::ICMP_UGT:
3922 default: llvm_unreachable("Unknown integer condition code!");
3923 case ICmpInst::ICMP_EQ: // (X u> 13 & X == 15) -> X == 15
3924 case ICmpInst::ICMP_UGT: // (X u> 13 & X u> 15) -> X u> 15
3925 return ReplaceInstUsesWith(I, RHS);
3926 case ICmpInst::ICMP_SGT: // (X u> 13 & X s> 15) -> no change
3928 case ICmpInst::ICMP_NE:
3929 if (RHSCst == AddOne(LHSCst)) // (X u> 13 & X != 14) -> X u> 14
3930 return new ICmpInst(LHSCC, Val, RHSCst);
3931 break; // (X u> 13 & X != 15) -> no change
3932 case ICmpInst::ICMP_ULT: // (X u> 13 & X u< 15) -> (X-14) <u 1
3933 return InsertRangeTest(Val, AddOne(LHSCst),
3934 RHSCst, false, true, I);
3935 case ICmpInst::ICMP_SLT: // (X u> 13 & X s< 15) -> no change
3939 case ICmpInst::ICMP_SGT:
3941 default: llvm_unreachable("Unknown integer condition code!");
3942 case ICmpInst::ICMP_EQ: // (X s> 13 & X == 15) -> X == 15
3943 case ICmpInst::ICMP_SGT: // (X s> 13 & X s> 15) -> X s> 15
3944 return ReplaceInstUsesWith(I, RHS);
3945 case ICmpInst::ICMP_UGT: // (X s> 13 & X u> 15) -> no change
3947 case ICmpInst::ICMP_NE:
3948 if (RHSCst == AddOne(LHSCst)) // (X s> 13 & X != 14) -> X s> 14
3949 return new ICmpInst(LHSCC, Val, RHSCst);
3950 break; // (X s> 13 & X != 15) -> no change
3951 case ICmpInst::ICMP_SLT: // (X s> 13 & X s< 15) -> (X-14) s< 1
3952 return InsertRangeTest(Val, AddOne(LHSCst),
3953 RHSCst, true, true, I);
3954 case ICmpInst::ICMP_ULT: // (X s> 13 & X u< 15) -> no change
3963 Instruction *InstCombiner::FoldAndOfFCmps(Instruction &I, FCmpInst *LHS,
3966 if (LHS->getPredicate() == FCmpInst::FCMP_ORD &&
3967 RHS->getPredicate() == FCmpInst::FCMP_ORD) {
3968 // (fcmp ord x, c) & (fcmp ord y, c) -> (fcmp ord x, y)
3969 if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1)))
3970 if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) {
3971 // If either of the constants are nans, then the whole thing returns
3973 if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
3974 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
3975 return new FCmpInst(FCmpInst::FCMP_ORD,
3976 LHS->getOperand(0), RHS->getOperand(0));
3979 // Handle vector zeros. This occurs because the canonical form of
3980 // "fcmp ord x,x" is "fcmp ord x, 0".
3981 if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
3982 isa<ConstantAggregateZero>(RHS->getOperand(1)))
3983 return new FCmpInst(FCmpInst::FCMP_ORD,
3984 LHS->getOperand(0), RHS->getOperand(0));
3988 Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1);
3989 Value *Op1LHS = RHS->getOperand(0), *Op1RHS = RHS->getOperand(1);
3990 FCmpInst::Predicate Op0CC = LHS->getPredicate(), Op1CC = RHS->getPredicate();
3993 if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) {
3994 // Swap RHS operands to match LHS.
3995 Op1CC = FCmpInst::getSwappedPredicate(Op1CC);
3996 std::swap(Op1LHS, Op1RHS);
3999 if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) {
4000 // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y).
4002 return new FCmpInst((FCmpInst::Predicate)Op0CC, Op0LHS, Op0RHS);
4004 if (Op0CC == FCmpInst::FCMP_FALSE || Op1CC == FCmpInst::FCMP_FALSE)
4005 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
4006 if (Op0CC == FCmpInst::FCMP_TRUE)
4007 return ReplaceInstUsesWith(I, RHS);
4008 if (Op1CC == FCmpInst::FCMP_TRUE)
4009 return ReplaceInstUsesWith(I, LHS);
4013 unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered);
4014 unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered);
4016 std::swap(LHS, RHS);
4017 std::swap(Op0Pred, Op1Pred);
4018 std::swap(Op0Ordered, Op1Ordered);
4021 // uno && ueq -> uno && (uno || eq) -> ueq
4022 // ord && olt -> ord && (ord && lt) -> olt
4023 if (Op0Ordered == Op1Ordered)
4024 return ReplaceInstUsesWith(I, RHS);
4026 // uno && oeq -> uno && (ord && eq) -> false
4027 // uno && ord -> false
4029 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
4030 // ord && ueq -> ord && (uno || eq) -> oeq
4031 return cast<Instruction>(getFCmpValue(true, Op1Pred,
4032 Op0LHS, Op0RHS, Context));
4040 Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
4041 bool Changed = SimplifyCommutative(I);
4042 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4044 if (isa<UndefValue>(Op1)) // X & undef -> 0
4045 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
4049 return ReplaceInstUsesWith(I, Op1);
4051 // See if we can simplify any instructions used by the instruction whose sole
4052 // purpose is to compute bits we don't care about.
4053 if (SimplifyDemandedInstructionBits(I))
4055 if (isa<VectorType>(I.getType())) {
4056 if (ConstantVector *CP = dyn_cast<ConstantVector>(Op1)) {
4057 if (CP->isAllOnesValue()) // X & <-1,-1> -> X
4058 return ReplaceInstUsesWith(I, I.getOperand(0));
4059 } else if (isa<ConstantAggregateZero>(Op1)) {
4060 return ReplaceInstUsesWith(I, Op1); // X & <0,0> -> <0,0>
4064 if (ConstantInt *AndRHS = dyn_cast<ConstantInt>(Op1)) {
4065 const APInt& AndRHSMask = AndRHS->getValue();
4066 APInt NotAndRHS(~AndRHSMask);
4068 // Optimize a variety of ((val OP C1) & C2) combinations...
4069 if (isa<BinaryOperator>(Op0)) {
4070 Instruction *Op0I = cast<Instruction>(Op0);
4071 Value *Op0LHS = Op0I->getOperand(0);
4072 Value *Op0RHS = Op0I->getOperand(1);
4073 switch (Op0I->getOpcode()) {
4074 case Instruction::Xor:
4075 case Instruction::Or:
4076 // If the mask is only needed on one incoming arm, push it up.
4077 if (Op0I->hasOneUse()) {
4078 if (MaskedValueIsZero(Op0LHS, NotAndRHS)) {
4079 // Not masking anything out for the LHS, move to RHS.
4080 Value *NewRHS = Builder->CreateAnd(Op0RHS, AndRHS,
4081 Op0RHS->getName()+".masked");
4082 return BinaryOperator::Create(
4083 cast<BinaryOperator>(Op0I)->getOpcode(), Op0LHS, NewRHS);
4085 if (!isa<Constant>(Op0RHS) &&
4086 MaskedValueIsZero(Op0RHS, NotAndRHS)) {
4087 // Not masking anything out for the RHS, move to LHS.
4088 Value *NewLHS = Builder->CreateAnd(Op0LHS, AndRHS,
4089 Op0LHS->getName()+".masked");
4090 return BinaryOperator::Create(
4091 cast<BinaryOperator>(Op0I)->getOpcode(), NewLHS, Op0RHS);
4096 case Instruction::Add:
4097 // ((A & N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == AndRHS.
4098 // ((A | N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
4099 // ((A ^ N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
4100 if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, false, I))
4101 return BinaryOperator::CreateAnd(V, AndRHS);
4102 if (Value *V = FoldLogicalPlusAnd(Op0RHS, Op0LHS, AndRHS, false, I))
4103 return BinaryOperator::CreateAnd(V, AndRHS); // Add commutes
4106 case Instruction::Sub:
4107 // ((A & N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == AndRHS.
4108 // ((A | N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
4109 // ((A ^ N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
4110 if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, true, I))
4111 return BinaryOperator::CreateAnd(V, AndRHS);
4113 // (A - N) & AndRHS -> -N & AndRHS iff A&AndRHS==0 and AndRHS
4114 // has 1's for all bits that the subtraction with A might affect.
4115 if (Op0I->hasOneUse()) {
4116 uint32_t BitWidth = AndRHSMask.getBitWidth();
4117 uint32_t Zeros = AndRHSMask.countLeadingZeros();
4118 APInt Mask = APInt::getLowBitsSet(BitWidth, BitWidth - Zeros);
4120 ConstantInt *A = dyn_cast<ConstantInt>(Op0LHS);
4121 if (!(A && A->isZero()) && // avoid infinite recursion.
4122 MaskedValueIsZero(Op0LHS, Mask)) {
4123 Value *NewNeg = Builder->CreateNeg(Op0RHS);
4124 return BinaryOperator::CreateAnd(NewNeg, AndRHS);
4129 case Instruction::Shl:
4130 case Instruction::LShr:
4131 // (1 << x) & 1 --> zext(x == 0)
4132 // (1 >> x) & 1 --> zext(x == 0)
4133 if (AndRHSMask == 1 && Op0LHS == AndRHS) {
4135 Builder->CreateICmpEQ(Op0RHS, Constant::getNullValue(I.getType()));
4136 return new ZExtInst(NewICmp, I.getType());
4141 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1)))
4142 if (Instruction *Res = OptAndOp(Op0I, Op0CI, AndRHS, I))
4144 } else if (CastInst *CI = dyn_cast<CastInst>(Op0)) {
4145 // If this is an integer truncation or change from signed-to-unsigned, and
4146 // if the source is an and/or with immediate, transform it. This
4147 // frequently occurs for bitfield accesses.
4148 if (Instruction *CastOp = dyn_cast<Instruction>(CI->getOperand(0))) {
4149 if ((isa<TruncInst>(CI) || isa<BitCastInst>(CI)) &&
4150 CastOp->getNumOperands() == 2)
4151 if (ConstantInt *AndCI = dyn_cast<ConstantInt>(CastOp->getOperand(1))) {
4152 if (CastOp->getOpcode() == Instruction::And) {
4153 // Change: and (cast (and X, C1) to T), C2
4154 // into : and (cast X to T), trunc_or_bitcast(C1)&C2
4155 // This will fold the two constants together, which may allow
4156 // other simplifications.
4157 Value *NewCast = Builder->CreateTruncOrBitCast(
4158 CastOp->getOperand(0), I.getType(),
4159 CastOp->getName()+".shrunk");
4160 // trunc_or_bitcast(C1)&C2
4161 Constant *C3 = ConstantExpr::getTruncOrBitCast(AndCI,I.getType());
4162 C3 = ConstantExpr::getAnd(C3, AndRHS);
4163 return BinaryOperator::CreateAnd(NewCast, C3);
4164 } else if (CastOp->getOpcode() == Instruction::Or) {
4165 // Change: and (cast (or X, C1) to T), C2
4166 // into : trunc(C1)&C2 iff trunc(C1)&C2 == C2
4167 Constant *C3 = ConstantExpr::getTruncOrBitCast(AndCI,I.getType());
4168 if (ConstantExpr::getAnd(C3, AndRHS) == AndRHS)
4170 return ReplaceInstUsesWith(I, AndRHS);
4176 // Try to fold constant and into select arguments.
4177 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
4178 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
4180 if (isa<PHINode>(Op0))
4181 if (Instruction *NV = FoldOpIntoPhi(I))
4185 Value *Op0NotVal = dyn_castNotVal(Op0);
4186 Value *Op1NotVal = dyn_castNotVal(Op1);
4188 if (Op0NotVal == Op1 || Op1NotVal == Op0) // A & ~A == ~A & A == 0
4189 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
4191 // (~A & ~B) == (~(A | B)) - De Morgan's Law
4192 if (Op0NotVal && Op1NotVal && isOnlyUse(Op0) && isOnlyUse(Op1)) {
4193 Value *Or = Builder->CreateOr(Op0NotVal, Op1NotVal,
4194 I.getName()+".demorgan");
4195 return BinaryOperator::CreateNot(Or);
4199 Value *A = 0, *B = 0, *C = 0, *D = 0;
4200 if (match(Op0, m_Or(m_Value(A), m_Value(B)))) {
4201 if (A == Op1 || B == Op1) // (A | ?) & A --> A
4202 return ReplaceInstUsesWith(I, Op1);
4204 // (A|B) & ~(A&B) -> A^B
4205 if (match(Op1, m_Not(m_And(m_Value(C), m_Value(D))))) {
4206 if ((A == C && B == D) || (A == D && B == C))
4207 return BinaryOperator::CreateXor(A, B);
4211 if (match(Op1, m_Or(m_Value(A), m_Value(B)))) {
4212 if (A == Op0 || B == Op0) // A & (A | ?) --> A
4213 return ReplaceInstUsesWith(I, Op0);
4215 // ~(A&B) & (A|B) -> A^B
4216 if (match(Op0, m_Not(m_And(m_Value(C), m_Value(D))))) {
4217 if ((A == C && B == D) || (A == D && B == C))
4218 return BinaryOperator::CreateXor(A, B);
4222 if (Op0->hasOneUse() &&
4223 match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
4224 if (A == Op1) { // (A^B)&A -> A&(A^B)
4225 I.swapOperands(); // Simplify below
4226 std::swap(Op0, Op1);
4227 } else if (B == Op1) { // (A^B)&B -> B&(B^A)
4228 cast<BinaryOperator>(Op0)->swapOperands();
4229 I.swapOperands(); // Simplify below
4230 std::swap(Op0, Op1);
4234 if (Op1->hasOneUse() &&
4235 match(Op1, m_Xor(m_Value(A), m_Value(B)))) {
4236 if (B == Op0) { // B&(A^B) -> B&(B^A)
4237 cast<BinaryOperator>(Op1)->swapOperands();
4240 if (A == Op0) // A&(A^B) -> A & ~B
4241 return BinaryOperator::CreateAnd(A, Builder->CreateNot(B, "tmp"));
4244 // (A&((~A)|B)) -> A&B
4245 if (match(Op0, m_Or(m_Not(m_Specific(Op1)), m_Value(A))) ||
4246 match(Op0, m_Or(m_Value(A), m_Not(m_Specific(Op1)))))
4247 return BinaryOperator::CreateAnd(A, Op1);
4248 if (match(Op1, m_Or(m_Not(m_Specific(Op0)), m_Value(A))) ||
4249 match(Op1, m_Or(m_Value(A), m_Not(m_Specific(Op0)))))
4250 return BinaryOperator::CreateAnd(A, Op0);
4253 if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1)) {
4254 // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
4255 if (Instruction *R = AssociativeOpt(I, FoldICmpLogical(*this, RHS)))
4258 if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0))
4259 if (Instruction *Res = FoldAndOfICmps(I, LHS, RHS))
4263 // fold (and (cast A), (cast B)) -> (cast (and A, B))
4264 if (CastInst *Op0C = dyn_cast<CastInst>(Op0))
4265 if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
4266 if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind ?
4267 const Type *SrcTy = Op0C->getOperand(0)->getType();
4268 if (SrcTy == Op1C->getOperand(0)->getType() &&
4269 SrcTy->isIntOrIntVector() &&
4270 // Only do this if the casts both really cause code to be generated.
4271 ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0),
4273 ValueRequiresCast(Op1C->getOpcode(), Op1C->getOperand(0),
4275 Value *NewOp = Builder->CreateAnd(Op0C->getOperand(0),
4276 Op1C->getOperand(0), I.getName());
4277 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
4281 // (X >> Z) & (Y >> Z) -> (X&Y) >> Z for all shifts.
4282 if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) {
4283 if (BinaryOperator *SI0 = dyn_cast<BinaryOperator>(Op0))
4284 if (SI0->isShift() && SI0->getOpcode() == SI1->getOpcode() &&
4285 SI0->getOperand(1) == SI1->getOperand(1) &&
4286 (SI0->hasOneUse() || SI1->hasOneUse())) {
4288 Builder->CreateAnd(SI0->getOperand(0), SI1->getOperand(0),
4290 return BinaryOperator::Create(SI1->getOpcode(), NewOp,
4291 SI1->getOperand(1));
4295 // If and'ing two fcmp, try combine them into one.
4296 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0))) {
4297 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
4298 if (Instruction *Res = FoldAndOfFCmps(I, LHS, RHS))
4302 return Changed ? &I : 0;
4305 /// CollectBSwapParts - Analyze the specified subexpression and see if it is
4306 /// capable of providing pieces of a bswap. The subexpression provides pieces
4307 /// of a bswap if it is proven that each of the non-zero bytes in the output of
4308 /// the expression came from the corresponding "byte swapped" byte in some other
4309 /// value. For example, if the current subexpression is "(shl i32 %X, 24)" then
4310 /// we know that the expression deposits the low byte of %X into the high byte
4311 /// of the bswap result and that all other bytes are zero. This expression is
4312 /// accepted, the high byte of ByteValues is set to X to indicate a correct
4315 /// This function returns true if the match was unsuccessful and false if so.
4316 /// On entry to the function the "OverallLeftShift" is a signed integer value
4317 /// indicating the number of bytes that the subexpression is later shifted. For
4318 /// example, if the expression is later right shifted by 16 bits, the
4319 /// OverallLeftShift value would be -2 on entry. This is used to specify which
4320 /// byte of ByteValues is actually being set.
4322 /// Similarly, ByteMask is a bitmask where a bit is clear if its corresponding
4323 /// byte is masked to zero by a user. For example, in (X & 255), X will be
4324 /// processed with a bytemask of 1. Because bytemask is 32-bits, this limits
4325 /// this function to working on up to 32-byte (256 bit) values. ByteMask is
4326 /// always in the local (OverallLeftShift) coordinate space.
4328 static bool CollectBSwapParts(Value *V, int OverallLeftShift, uint32_t ByteMask,
4329 SmallVector<Value*, 8> &ByteValues) {
4330 if (Instruction *I = dyn_cast<Instruction>(V)) {
4331 // If this is an or instruction, it may be an inner node of the bswap.
4332 if (I->getOpcode() == Instruction::Or) {
4333 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
4335 CollectBSwapParts(I->getOperand(1), OverallLeftShift, ByteMask,
4339 // If this is a logical shift by a constant multiple of 8, recurse with
4340 // OverallLeftShift and ByteMask adjusted.
4341 if (I->isLogicalShift() && isa<ConstantInt>(I->getOperand(1))) {
4343 cast<ConstantInt>(I->getOperand(1))->getLimitedValue(~0U);
4344 // Ensure the shift amount is defined and of a byte value.
4345 if ((ShAmt & 7) || (ShAmt > 8*ByteValues.size()))
4348 unsigned ByteShift = ShAmt >> 3;
4349 if (I->getOpcode() == Instruction::Shl) {
4350 // X << 2 -> collect(X, +2)
4351 OverallLeftShift += ByteShift;
4352 ByteMask >>= ByteShift;
4354 // X >>u 2 -> collect(X, -2)
4355 OverallLeftShift -= ByteShift;
4356 ByteMask <<= ByteShift;
4357 ByteMask &= (~0U >> (32-ByteValues.size()));
4360 if (OverallLeftShift >= (int)ByteValues.size()) return true;
4361 if (OverallLeftShift <= -(int)ByteValues.size()) return true;
4363 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
4367 // If this is a logical 'and' with a mask that clears bytes, clear the
4368 // corresponding bytes in ByteMask.
4369 if (I->getOpcode() == Instruction::And &&
4370 isa<ConstantInt>(I->getOperand(1))) {
4371 // Scan every byte of the and mask, seeing if the byte is either 0 or 255.
4372 unsigned NumBytes = ByteValues.size();
4373 APInt Byte(I->getType()->getPrimitiveSizeInBits(), 255);
4374 const APInt &AndMask = cast<ConstantInt>(I->getOperand(1))->getValue();
4376 for (unsigned i = 0; i != NumBytes; ++i, Byte <<= 8) {
4377 // If this byte is masked out by a later operation, we don't care what
4379 if ((ByteMask & (1 << i)) == 0)
4382 // If the AndMask is all zeros for this byte, clear the bit.
4383 APInt MaskB = AndMask & Byte;
4385 ByteMask &= ~(1U << i);
4389 // If the AndMask is not all ones for this byte, it's not a bytezap.
4393 // Otherwise, this byte is kept.
4396 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
4401 // Okay, we got to something that isn't a shift, 'or' or 'and'. This must be
4402 // the input value to the bswap. Some observations: 1) if more than one byte
4403 // is demanded from this input, then it could not be successfully assembled
4404 // into a byteswap. At least one of the two bytes would not be aligned with
4405 // their ultimate destination.
4406 if (!isPowerOf2_32(ByteMask)) return true;
4407 unsigned InputByteNo = CountTrailingZeros_32(ByteMask);
4409 // 2) The input and ultimate destinations must line up: if byte 3 of an i32
4410 // is demanded, it needs to go into byte 0 of the result. This means that the
4411 // byte needs to be shifted until it lands in the right byte bucket. The
4412 // shift amount depends on the position: if the byte is coming from the high
4413 // part of the value (e.g. byte 3) then it must be shifted right. If from the
4414 // low part, it must be shifted left.
4415 unsigned DestByteNo = InputByteNo + OverallLeftShift;
4416 if (InputByteNo < ByteValues.size()/2) {
4417 if (ByteValues.size()-1-DestByteNo != InputByteNo)
4420 if (ByteValues.size()-1-DestByteNo != InputByteNo)
4424 // If the destination byte value is already defined, the values are or'd
4425 // together, which isn't a bswap (unless it's an or of the same bits).
4426 if (ByteValues[DestByteNo] && ByteValues[DestByteNo] != V)
4428 ByteValues[DestByteNo] = V;
4432 /// MatchBSwap - Given an OR instruction, check to see if this is a bswap idiom.
4433 /// If so, insert the new bswap intrinsic and return it.
4434 Instruction *InstCombiner::MatchBSwap(BinaryOperator &I) {
4435 const IntegerType *ITy = dyn_cast<IntegerType>(I.getType());
4436 if (!ITy || ITy->getBitWidth() % 16 ||
4437 // ByteMask only allows up to 32-byte values.
4438 ITy->getBitWidth() > 32*8)
4439 return 0; // Can only bswap pairs of bytes. Can't do vectors.
4441 /// ByteValues - For each byte of the result, we keep track of which value
4442 /// defines each byte.
4443 SmallVector<Value*, 8> ByteValues;
4444 ByteValues.resize(ITy->getBitWidth()/8);
4446 // Try to find all the pieces corresponding to the bswap.
4447 uint32_t ByteMask = ~0U >> (32-ByteValues.size());
4448 if (CollectBSwapParts(&I, 0, ByteMask, ByteValues))
4451 // Check to see if all of the bytes come from the same value.
4452 Value *V = ByteValues[0];
4453 if (V == 0) return 0; // Didn't find a byte? Must be zero.
4455 // Check to make sure that all of the bytes come from the same value.
4456 for (unsigned i = 1, e = ByteValues.size(); i != e; ++i)
4457 if (ByteValues[i] != V)
4459 const Type *Tys[] = { ITy };
4460 Module *M = I.getParent()->getParent()->getParent();
4461 Function *F = Intrinsic::getDeclaration(M, Intrinsic::bswap, Tys, 1);
4462 return CallInst::Create(F, V);
4465 /// MatchSelectFromAndOr - We have an expression of the form (A&C)|(B&D). Check
4466 /// If A is (cond?-1:0) and either B or D is ~(cond?-1,0) or (cond?0,-1), then
4467 /// we can simplify this expression to "cond ? C : D or B".
4468 static Instruction *MatchSelectFromAndOr(Value *A, Value *B,
4470 LLVMContext *Context) {
4471 // If A is not a select of -1/0, this cannot match.
4473 if (!match(A, m_SelectCst<-1, 0>(m_Value(Cond))))
4476 // ((cond?-1:0)&C) | (B&(cond?0:-1)) -> cond ? C : B.
4477 if (match(D, m_SelectCst<0, -1>(m_Specific(Cond))))
4478 return SelectInst::Create(Cond, C, B);
4479 if (match(D, m_Not(m_SelectCst<-1, 0>(m_Specific(Cond)))))
4480 return SelectInst::Create(Cond, C, B);
4481 // ((cond?-1:0)&C) | ((cond?0:-1)&D) -> cond ? C : D.
4482 if (match(B, m_SelectCst<0, -1>(m_Specific(Cond))))
4483 return SelectInst::Create(Cond, C, D);
4484 if (match(B, m_Not(m_SelectCst<-1, 0>(m_Specific(Cond)))))
4485 return SelectInst::Create(Cond, C, D);
4489 /// FoldOrOfICmps - Fold (icmp)|(icmp) if possible.
4490 Instruction *InstCombiner::FoldOrOfICmps(Instruction &I,
4491 ICmpInst *LHS, ICmpInst *RHS) {
4493 ConstantInt *LHSCst, *RHSCst;
4494 ICmpInst::Predicate LHSCC, RHSCC;
4496 // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2).
4497 if (!match(LHS, m_ICmp(LHSCC, m_Value(Val),
4498 m_ConstantInt(LHSCst))) ||
4499 !match(RHS, m_ICmp(RHSCC, m_Value(Val2),
4500 m_ConstantInt(RHSCst))))
4503 // From here on, we only handle:
4504 // (icmp1 A, C1) | (icmp2 A, C2) --> something simpler.
4505 if (Val != Val2) return 0;
4507 // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
4508 if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE ||
4509 RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE ||
4510 LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE ||
4511 RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE)
4514 // We can't fold (ugt x, C) | (sgt x, C2).
4515 if (!PredicatesFoldable(LHSCC, RHSCC))
4518 // Ensure that the larger constant is on the RHS.
4520 if (ICmpInst::isSignedPredicate(LHSCC) ||
4521 (ICmpInst::isEquality(LHSCC) &&
4522 ICmpInst::isSignedPredicate(RHSCC)))
4523 ShouldSwap = LHSCst->getValue().sgt(RHSCst->getValue());
4525 ShouldSwap = LHSCst->getValue().ugt(RHSCst->getValue());
4528 std::swap(LHS, RHS);
4529 std::swap(LHSCst, RHSCst);
4530 std::swap(LHSCC, RHSCC);
4533 // At this point, we know we have have two icmp instructions
4534 // comparing a value against two constants and or'ing the result
4535 // together. Because of the above check, we know that we only have
4536 // ICMP_EQ, ICMP_NE, ICMP_LT, and ICMP_GT here. We also know (from the
4537 // FoldICmpLogical check above), that the two constants are not
4539 assert(LHSCst != RHSCst && "Compares not folded above?");
4542 default: llvm_unreachable("Unknown integer condition code!");
4543 case ICmpInst::ICMP_EQ:
4545 default: llvm_unreachable("Unknown integer condition code!");
4546 case ICmpInst::ICMP_EQ:
4547 if (LHSCst == SubOne(RHSCst)) {
4548 // (X == 13 | X == 14) -> X-13 <u 2
4549 Constant *AddCST = ConstantExpr::getNeg(LHSCst);
4550 Value *Add = Builder->CreateAdd(Val, AddCST, Val->getName()+".off");
4551 AddCST = ConstantExpr::getSub(AddOne(RHSCst), LHSCst);
4552 return new ICmpInst(ICmpInst::ICMP_ULT, Add, AddCST);
4554 break; // (X == 13 | X == 15) -> no change
4555 case ICmpInst::ICMP_UGT: // (X == 13 | X u> 14) -> no change
4556 case ICmpInst::ICMP_SGT: // (X == 13 | X s> 14) -> no change
4558 case ICmpInst::ICMP_NE: // (X == 13 | X != 15) -> X != 15
4559 case ICmpInst::ICMP_ULT: // (X == 13 | X u< 15) -> X u< 15
4560 case ICmpInst::ICMP_SLT: // (X == 13 | X s< 15) -> X s< 15
4561 return ReplaceInstUsesWith(I, RHS);
4564 case ICmpInst::ICMP_NE:
4566 default: llvm_unreachable("Unknown integer condition code!");
4567 case ICmpInst::ICMP_EQ: // (X != 13 | X == 15) -> X != 13
4568 case ICmpInst::ICMP_UGT: // (X != 13 | X u> 15) -> X != 13
4569 case ICmpInst::ICMP_SGT: // (X != 13 | X s> 15) -> X != 13
4570 return ReplaceInstUsesWith(I, LHS);
4571 case ICmpInst::ICMP_NE: // (X != 13 | X != 15) -> true
4572 case ICmpInst::ICMP_ULT: // (X != 13 | X u< 15) -> true
4573 case ICmpInst::ICMP_SLT: // (X != 13 | X s< 15) -> true
4574 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
4577 case ICmpInst::ICMP_ULT:
4579 default: llvm_unreachable("Unknown integer condition code!");
4580 case ICmpInst::ICMP_EQ: // (X u< 13 | X == 14) -> no change
4582 case ICmpInst::ICMP_UGT: // (X u< 13 | X u> 15) -> (X-13) u> 2
4583 // If RHSCst is [us]MAXINT, it is always false. Not handling
4584 // this can cause overflow.
4585 if (RHSCst->isMaxValue(false))
4586 return ReplaceInstUsesWith(I, LHS);
4587 return InsertRangeTest(Val, LHSCst, AddOne(RHSCst),
4589 case ICmpInst::ICMP_SGT: // (X u< 13 | X s> 15) -> no change
4591 case ICmpInst::ICMP_NE: // (X u< 13 | X != 15) -> X != 15
4592 case ICmpInst::ICMP_ULT: // (X u< 13 | X u< 15) -> X u< 15
4593 return ReplaceInstUsesWith(I, RHS);
4594 case ICmpInst::ICMP_SLT: // (X u< 13 | X s< 15) -> no change
4598 case ICmpInst::ICMP_SLT:
4600 default: llvm_unreachable("Unknown integer condition code!");
4601 case ICmpInst::ICMP_EQ: // (X s< 13 | X == 14) -> no change
4603 case ICmpInst::ICMP_SGT: // (X s< 13 | X s> 15) -> (X-13) s> 2
4604 // If RHSCst is [us]MAXINT, it is always false. Not handling
4605 // this can cause overflow.
4606 if (RHSCst->isMaxValue(true))
4607 return ReplaceInstUsesWith(I, LHS);
4608 return InsertRangeTest(Val, LHSCst, AddOne(RHSCst),
4610 case ICmpInst::ICMP_UGT: // (X s< 13 | X u> 15) -> no change
4612 case ICmpInst::ICMP_NE: // (X s< 13 | X != 15) -> X != 15
4613 case ICmpInst::ICMP_SLT: // (X s< 13 | X s< 15) -> X s< 15
4614 return ReplaceInstUsesWith(I, RHS);
4615 case ICmpInst::ICMP_ULT: // (X s< 13 | X u< 15) -> no change
4619 case ICmpInst::ICMP_UGT:
4621 default: llvm_unreachable("Unknown integer condition code!");
4622 case ICmpInst::ICMP_EQ: // (X u> 13 | X == 15) -> X u> 13
4623 case ICmpInst::ICMP_UGT: // (X u> 13 | X u> 15) -> X u> 13
4624 return ReplaceInstUsesWith(I, LHS);
4625 case ICmpInst::ICMP_SGT: // (X u> 13 | X s> 15) -> no change
4627 case ICmpInst::ICMP_NE: // (X u> 13 | X != 15) -> true
4628 case ICmpInst::ICMP_ULT: // (X u> 13 | X u< 15) -> true
4629 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
4630 case ICmpInst::ICMP_SLT: // (X u> 13 | X s< 15) -> no change
4634 case ICmpInst::ICMP_SGT:
4636 default: llvm_unreachable("Unknown integer condition code!");
4637 case ICmpInst::ICMP_EQ: // (X s> 13 | X == 15) -> X > 13
4638 case ICmpInst::ICMP_SGT: // (X s> 13 | X s> 15) -> X > 13
4639 return ReplaceInstUsesWith(I, LHS);
4640 case ICmpInst::ICMP_UGT: // (X s> 13 | X u> 15) -> no change
4642 case ICmpInst::ICMP_NE: // (X s> 13 | X != 15) -> true
4643 case ICmpInst::ICMP_SLT: // (X s> 13 | X s< 15) -> true
4644 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
4645 case ICmpInst::ICMP_ULT: // (X s> 13 | X u< 15) -> no change
4653 Instruction *InstCombiner::FoldOrOfFCmps(Instruction &I, FCmpInst *LHS,
4655 if (LHS->getPredicate() == FCmpInst::FCMP_UNO &&
4656 RHS->getPredicate() == FCmpInst::FCMP_UNO &&
4657 LHS->getOperand(0)->getType() == RHS->getOperand(0)->getType()) {
4658 if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1)))
4659 if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) {
4660 // If either of the constants are nans, then the whole thing returns
4662 if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
4663 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
4665 // Otherwise, no need to compare the two constants, compare the
4667 return new FCmpInst(FCmpInst::FCMP_UNO,
4668 LHS->getOperand(0), RHS->getOperand(0));
4671 // Handle vector zeros. This occurs because the canonical form of
4672 // "fcmp uno x,x" is "fcmp uno x, 0".
4673 if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
4674 isa<ConstantAggregateZero>(RHS->getOperand(1)))
4675 return new FCmpInst(FCmpInst::FCMP_UNO,
4676 LHS->getOperand(0), RHS->getOperand(0));
4681 Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1);
4682 Value *Op1LHS = RHS->getOperand(0), *Op1RHS = RHS->getOperand(1);
4683 FCmpInst::Predicate Op0CC = LHS->getPredicate(), Op1CC = RHS->getPredicate();
4685 if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) {
4686 // Swap RHS operands to match LHS.
4687 Op1CC = FCmpInst::getSwappedPredicate(Op1CC);
4688 std::swap(Op1LHS, Op1RHS);
4690 if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) {
4691 // Simplify (fcmp cc0 x, y) | (fcmp cc1 x, y).
4693 return new FCmpInst((FCmpInst::Predicate)Op0CC,
4695 if (Op0CC == FCmpInst::FCMP_TRUE || Op1CC == FCmpInst::FCMP_TRUE)
4696 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
4697 if (Op0CC == FCmpInst::FCMP_FALSE)
4698 return ReplaceInstUsesWith(I, RHS);
4699 if (Op1CC == FCmpInst::FCMP_FALSE)
4700 return ReplaceInstUsesWith(I, LHS);
4703 unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered);
4704 unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered);
4705 if (Op0Ordered == Op1Ordered) {
4706 // If both are ordered or unordered, return a new fcmp with
4707 // or'ed predicates.
4708 Value *RV = getFCmpValue(Op0Ordered, Op0Pred|Op1Pred,
4709 Op0LHS, Op0RHS, Context);
4710 if (Instruction *I = dyn_cast<Instruction>(RV))
4712 // Otherwise, it's a constant boolean value...
4713 return ReplaceInstUsesWith(I, RV);
4719 /// FoldOrWithConstants - This helper function folds:
4721 /// ((A | B) & C1) | (B & C2)
4727 /// when the XOR of the two constants is "all ones" (-1).
4728 Instruction *InstCombiner::FoldOrWithConstants(BinaryOperator &I, Value *Op,
4729 Value *A, Value *B, Value *C) {
4730 ConstantInt *CI1 = dyn_cast<ConstantInt>(C);
4734 ConstantInt *CI2 = 0;
4735 if (!match(Op, m_And(m_Value(V1), m_ConstantInt(CI2)))) return 0;
4737 APInt Xor = CI1->getValue() ^ CI2->getValue();
4738 if (!Xor.isAllOnesValue()) return 0;
4740 if (V1 == A || V1 == B) {
4741 Value *NewOp = Builder->CreateAnd((V1 == A) ? B : A, CI1);
4742 return BinaryOperator::CreateOr(NewOp, V1);
4748 Instruction *InstCombiner::visitOr(BinaryOperator &I) {
4749 bool Changed = SimplifyCommutative(I);
4750 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4752 if (isa<UndefValue>(Op1)) // X | undef -> -1
4753 return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
4757 return ReplaceInstUsesWith(I, Op0);
4759 // See if we can simplify any instructions used by the instruction whose sole
4760 // purpose is to compute bits we don't care about.
4761 if (SimplifyDemandedInstructionBits(I))
4763 if (isa<VectorType>(I.getType())) {
4764 if (isa<ConstantAggregateZero>(Op1)) {
4765 return ReplaceInstUsesWith(I, Op0); // X | <0,0> -> X
4766 } else if (ConstantVector *CP = dyn_cast<ConstantVector>(Op1)) {
4767 if (CP->isAllOnesValue()) // X | <-1,-1> -> <-1,-1>
4768 return ReplaceInstUsesWith(I, I.getOperand(1));
4773 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
4774 ConstantInt *C1 = 0; Value *X = 0;
4775 // (X & C1) | C2 --> (X | C2) & (C1|C2)
4776 if (match(Op0, m_And(m_Value(X), m_ConstantInt(C1))) &&
4778 Value *Or = Builder->CreateOr(X, RHS);
4780 return BinaryOperator::CreateAnd(Or,
4781 ConstantInt::get(*Context, RHS->getValue() | C1->getValue()));
4784 // (X ^ C1) | C2 --> (X | C2) ^ (C1&~C2)
4785 if (match(Op0, m_Xor(m_Value(X), m_ConstantInt(C1))) &&
4787 Value *Or = Builder->CreateOr(X, RHS);
4789 return BinaryOperator::CreateXor(Or,
4790 ConstantInt::get(*Context, C1->getValue() & ~RHS->getValue()));
4793 // Try to fold constant and into select arguments.
4794 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
4795 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
4797 if (isa<PHINode>(Op0))
4798 if (Instruction *NV = FoldOpIntoPhi(I))
4802 Value *A = 0, *B = 0;
4803 ConstantInt *C1 = 0, *C2 = 0;
4805 if (match(Op0, m_And(m_Value(A), m_Value(B))))
4806 if (A == Op1 || B == Op1) // (A & ?) | A --> A
4807 return ReplaceInstUsesWith(I, Op1);
4808 if (match(Op1, m_And(m_Value(A), m_Value(B))))
4809 if (A == Op0 || B == Op0) // A | (A & ?) --> A
4810 return ReplaceInstUsesWith(I, Op0);
4812 // (A | B) | C and A | (B | C) -> bswap if possible.
4813 // (A >> B) | (C << D) and (A << B) | (B >> C) -> bswap if possible.
4814 if (match(Op0, m_Or(m_Value(), m_Value())) ||
4815 match(Op1, m_Or(m_Value(), m_Value())) ||
4816 (match(Op0, m_Shift(m_Value(), m_Value())) &&
4817 match(Op1, m_Shift(m_Value(), m_Value())))) {
4818 if (Instruction *BSwap = MatchBSwap(I))
4822 // (X^C)|Y -> (X|Y)^C iff Y&C == 0
4823 if (Op0->hasOneUse() &&
4824 match(Op0, m_Xor(m_Value(A), m_ConstantInt(C1))) &&
4825 MaskedValueIsZero(Op1, C1->getValue())) {
4826 Value *NOr = Builder->CreateOr(A, Op1);
4828 return BinaryOperator::CreateXor(NOr, C1);
4831 // Y|(X^C) -> (X|Y)^C iff Y&C == 0
4832 if (Op1->hasOneUse() &&
4833 match(Op1, m_Xor(m_Value(A), m_ConstantInt(C1))) &&
4834 MaskedValueIsZero(Op0, C1->getValue())) {
4835 Value *NOr = Builder->CreateOr(A, Op0);
4837 return BinaryOperator::CreateXor(NOr, C1);
4841 Value *C = 0, *D = 0;
4842 if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
4843 match(Op1, m_And(m_Value(B), m_Value(D)))) {
4844 Value *V1 = 0, *V2 = 0, *V3 = 0;
4845 C1 = dyn_cast<ConstantInt>(C);
4846 C2 = dyn_cast<ConstantInt>(D);
4847 if (C1 && C2) { // (A & C1)|(B & C2)
4848 // If we have: ((V + N) & C1) | (V & C2)
4849 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
4850 // replace with V+N.
4851 if (C1->getValue() == ~C2->getValue()) {
4852 if ((C2->getValue() & (C2->getValue()+1)) == 0 && // C2 == 0+1+
4853 match(A, m_Add(m_Value(V1), m_Value(V2)))) {
4854 // Add commutes, try both ways.
4855 if (V1 == B && MaskedValueIsZero(V2, C2->getValue()))
4856 return ReplaceInstUsesWith(I, A);
4857 if (V2 == B && MaskedValueIsZero(V1, C2->getValue()))
4858 return ReplaceInstUsesWith(I, A);
4860 // Or commutes, try both ways.
4861 if ((C1->getValue() & (C1->getValue()+1)) == 0 &&
4862 match(B, m_Add(m_Value(V1), m_Value(V2)))) {
4863 // Add commutes, try both ways.
4864 if (V1 == A && MaskedValueIsZero(V2, C1->getValue()))
4865 return ReplaceInstUsesWith(I, B);
4866 if (V2 == A && MaskedValueIsZero(V1, C1->getValue()))
4867 return ReplaceInstUsesWith(I, B);
4870 V1 = 0; V2 = 0; V3 = 0;
4873 // Check to see if we have any common things being and'ed. If so, find the
4874 // terms for V1 & (V2|V3).
4875 if (isOnlyUse(Op0) || isOnlyUse(Op1)) {
4876 if (A == B) // (A & C)|(A & D) == A & (C|D)
4877 V1 = A, V2 = C, V3 = D;
4878 else if (A == D) // (A & C)|(B & A) == A & (B|C)
4879 V1 = A, V2 = B, V3 = C;
4880 else if (C == B) // (A & C)|(C & D) == C & (A|D)
4881 V1 = C, V2 = A, V3 = D;
4882 else if (C == D) // (A & C)|(B & C) == C & (A|B)
4883 V1 = C, V2 = A, V3 = B;
4886 Value *Or = Builder->CreateOr(V2, V3, "tmp");
4887 return BinaryOperator::CreateAnd(V1, Or);
4891 // (A & (C0?-1:0)) | (B & ~(C0?-1:0)) -> C0 ? A : B, and commuted variants
4892 if (Instruction *Match = MatchSelectFromAndOr(A, B, C, D, Context))
4894 if (Instruction *Match = MatchSelectFromAndOr(B, A, D, C, Context))
4896 if (Instruction *Match = MatchSelectFromAndOr(C, B, A, D, Context))
4898 if (Instruction *Match = MatchSelectFromAndOr(D, A, B, C, Context))
4901 // ((A&~B)|(~A&B)) -> A^B
4902 if ((match(C, m_Not(m_Specific(D))) &&
4903 match(B, m_Not(m_Specific(A)))))
4904 return BinaryOperator::CreateXor(A, D);
4905 // ((~B&A)|(~A&B)) -> A^B
4906 if ((match(A, m_Not(m_Specific(D))) &&
4907 match(B, m_Not(m_Specific(C)))))
4908 return BinaryOperator::CreateXor(C, D);
4909 // ((A&~B)|(B&~A)) -> A^B
4910 if ((match(C, m_Not(m_Specific(B))) &&
4911 match(D, m_Not(m_Specific(A)))))
4912 return BinaryOperator::CreateXor(A, B);
4913 // ((~B&A)|(B&~A)) -> A^B
4914 if ((match(A, m_Not(m_Specific(B))) &&
4915 match(D, m_Not(m_Specific(C)))))
4916 return BinaryOperator::CreateXor(C, B);
4919 // (X >> Z) | (Y >> Z) -> (X|Y) >> Z for all shifts.
4920 if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) {
4921 if (BinaryOperator *SI0 = dyn_cast<BinaryOperator>(Op0))
4922 if (SI0->isShift() && SI0->getOpcode() == SI1->getOpcode() &&
4923 SI0->getOperand(1) == SI1->getOperand(1) &&
4924 (SI0->hasOneUse() || SI1->hasOneUse())) {
4925 Value *NewOp = Builder->CreateOr(SI0->getOperand(0), SI1->getOperand(0),
4927 return BinaryOperator::Create(SI1->getOpcode(), NewOp,
4928 SI1->getOperand(1));
4932 // ((A|B)&1)|(B&-2) -> (A&1) | B
4933 if (match(Op0, m_And(m_Or(m_Value(A), m_Value(B)), m_Value(C))) ||
4934 match(Op0, m_And(m_Value(C), m_Or(m_Value(A), m_Value(B))))) {
4935 Instruction *Ret = FoldOrWithConstants(I, Op1, A, B, C);
4936 if (Ret) return Ret;
4938 // (B&-2)|((A|B)&1) -> (A&1) | B
4939 if (match(Op1, m_And(m_Or(m_Value(A), m_Value(B)), m_Value(C))) ||
4940 match(Op1, m_And(m_Value(C), m_Or(m_Value(A), m_Value(B))))) {
4941 Instruction *Ret = FoldOrWithConstants(I, Op0, A, B, C);
4942 if (Ret) return Ret;
4945 if (match(Op0, m_Not(m_Value(A)))) { // ~A | Op1
4946 if (A == Op1) // ~A | A == -1
4947 return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
4951 // Note, A is still live here!
4952 if (match(Op1, m_Not(m_Value(B)))) { // Op0 | ~B
4954 return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
4956 // (~A | ~B) == (~(A & B)) - De Morgan's Law
4957 if (A && isOnlyUse(Op0) && isOnlyUse(Op1)) {
4958 Value *And = Builder->CreateAnd(A, B, I.getName()+".demorgan");
4959 return BinaryOperator::CreateNot(And);
4963 // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B)
4964 if (ICmpInst *RHS = dyn_cast<ICmpInst>(I.getOperand(1))) {
4965 if (Instruction *R = AssociativeOpt(I, FoldICmpLogical(*this, RHS)))
4968 if (ICmpInst *LHS = dyn_cast<ICmpInst>(I.getOperand(0)))
4969 if (Instruction *Res = FoldOrOfICmps(I, LHS, RHS))
4973 // fold (or (cast A), (cast B)) -> (cast (or A, B))
4974 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
4975 if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
4976 if (Op0C->getOpcode() == Op1C->getOpcode()) {// same cast kind ?
4977 if (!isa<ICmpInst>(Op0C->getOperand(0)) ||
4978 !isa<ICmpInst>(Op1C->getOperand(0))) {
4979 const Type *SrcTy = Op0C->getOperand(0)->getType();
4980 if (SrcTy == Op1C->getOperand(0)->getType() &&
4981 SrcTy->isIntOrIntVector() &&
4982 // Only do this if the casts both really cause code to be
4984 ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0),
4986 ValueRequiresCast(Op1C->getOpcode(), Op1C->getOperand(0),
4988 Value *NewOp = Builder->CreateOr(Op0C->getOperand(0),
4989 Op1C->getOperand(0), I.getName());
4990 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
4997 // (fcmp uno x, c) | (fcmp uno y, c) -> (fcmp uno x, y)
4998 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0))) {
4999 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
5000 if (Instruction *Res = FoldOrOfFCmps(I, LHS, RHS))
5004 return Changed ? &I : 0;
5009 // XorSelf - Implements: X ^ X --> 0
5012 XorSelf(Value *rhs) : RHS(rhs) {}
5013 bool shouldApply(Value *LHS) const { return LHS == RHS; }
5014 Instruction *apply(BinaryOperator &Xor) const {
5021 Instruction *InstCombiner::visitXor(BinaryOperator &I) {
5022 bool Changed = SimplifyCommutative(I);
5023 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5025 if (isa<UndefValue>(Op1)) {
5026 if (isa<UndefValue>(Op0))
5027 // Handle undef ^ undef -> 0 special case. This is a common
5029 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
5030 return ReplaceInstUsesWith(I, Op1); // X ^ undef -> undef
5033 // xor X, X = 0, even if X is nested in a sequence of Xor's.
5034 if (Instruction *Result = AssociativeOpt(I, XorSelf(Op1))) {
5035 assert(Result == &I && "AssociativeOpt didn't work?"); Result=Result;
5036 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
5039 // See if we can simplify any instructions used by the instruction whose sole
5040 // purpose is to compute bits we don't care about.
5041 if (SimplifyDemandedInstructionBits(I))
5043 if (isa<VectorType>(I.getType()))
5044 if (isa<ConstantAggregateZero>(Op1))
5045 return ReplaceInstUsesWith(I, Op0); // X ^ <0,0> -> X
5047 // Is this a ~ operation?
5048 if (Value *NotOp = dyn_castNotVal(&I)) {
5049 // ~(~X & Y) --> (X | ~Y) - De Morgan's Law
5050 // ~(~X | Y) === (X & ~Y) - De Morgan's Law
5051 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(NotOp)) {
5052 if (Op0I->getOpcode() == Instruction::And ||
5053 Op0I->getOpcode() == Instruction::Or) {
5054 if (dyn_castNotVal(Op0I->getOperand(1))) Op0I->swapOperands();
5055 if (Value *Op0NotVal = dyn_castNotVal(Op0I->getOperand(0))) {
5057 Builder->CreateNot(Op0I->getOperand(1),
5058 Op0I->getOperand(1)->getName()+".not");
5059 if (Op0I->getOpcode() == Instruction::And)
5060 return BinaryOperator::CreateOr(Op0NotVal, NotY);
5061 return BinaryOperator::CreateAnd(Op0NotVal, NotY);
5068 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
5069 if (RHS == ConstantInt::getTrue(*Context) && Op0->hasOneUse()) {
5070 // xor (cmp A, B), true = not (cmp A, B) = !cmp A, B
5071 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Op0))
5072 return new ICmpInst(ICI->getInversePredicate(),
5073 ICI->getOperand(0), ICI->getOperand(1));
5075 if (FCmpInst *FCI = dyn_cast<FCmpInst>(Op0))
5076 return new FCmpInst(FCI->getInversePredicate(),
5077 FCI->getOperand(0), FCI->getOperand(1));
5080 // fold (xor(zext(cmp)), 1) and (xor(sext(cmp)), -1) to ext(!cmp).
5081 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
5082 if (CmpInst *CI = dyn_cast<CmpInst>(Op0C->getOperand(0))) {
5083 if (CI->hasOneUse() && Op0C->hasOneUse()) {
5084 Instruction::CastOps Opcode = Op0C->getOpcode();
5085 if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) &&
5086 (RHS == ConstantExpr::getCast(Opcode,
5087 ConstantInt::getTrue(*Context),
5088 Op0C->getDestTy()))) {
5089 CI->setPredicate(CI->getInversePredicate());
5090 return CastInst::Create(Opcode, CI, Op0C->getType());
5096 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
5097 // ~(c-X) == X-c-1 == X+(-c-1)
5098 if (Op0I->getOpcode() == Instruction::Sub && RHS->isAllOnesValue())
5099 if (Constant *Op0I0C = dyn_cast<Constant>(Op0I->getOperand(0))) {
5100 Constant *NegOp0I0C = ConstantExpr::getNeg(Op0I0C);
5101 Constant *ConstantRHS = ConstantExpr::getSub(NegOp0I0C,
5102 ConstantInt::get(I.getType(), 1));
5103 return BinaryOperator::CreateAdd(Op0I->getOperand(1), ConstantRHS);
5106 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
5107 if (Op0I->getOpcode() == Instruction::Add) {
5108 // ~(X-c) --> (-c-1)-X
5109 if (RHS->isAllOnesValue()) {
5110 Constant *NegOp0CI = ConstantExpr::getNeg(Op0CI);
5111 return BinaryOperator::CreateSub(
5112 ConstantExpr::getSub(NegOp0CI,
5113 ConstantInt::get(I.getType(), 1)),
5114 Op0I->getOperand(0));
5115 } else if (RHS->getValue().isSignBit()) {
5116 // (X + C) ^ signbit -> (X + C + signbit)
5117 Constant *C = ConstantInt::get(*Context,
5118 RHS->getValue() + Op0CI->getValue());
5119 return BinaryOperator::CreateAdd(Op0I->getOperand(0), C);
5122 } else if (Op0I->getOpcode() == Instruction::Or) {
5123 // (X|C1)^C2 -> X^(C1|C2) iff X&~C1 == 0
5124 if (MaskedValueIsZero(Op0I->getOperand(0), Op0CI->getValue())) {
5125 Constant *NewRHS = ConstantExpr::getOr(Op0CI, RHS);
5126 // Anything in both C1 and C2 is known to be zero, remove it from
5128 Constant *CommonBits = ConstantExpr::getAnd(Op0CI, RHS);
5129 NewRHS = ConstantExpr::getAnd(NewRHS,
5130 ConstantExpr::getNot(CommonBits));
5132 I.setOperand(0, Op0I->getOperand(0));
5133 I.setOperand(1, NewRHS);
5140 // Try to fold constant and into select arguments.
5141 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
5142 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
5144 if (isa<PHINode>(Op0))
5145 if (Instruction *NV = FoldOpIntoPhi(I))
5149 if (Value *X = dyn_castNotVal(Op0)) // ~A ^ A == -1
5151 return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
5153 if (Value *X = dyn_castNotVal(Op1)) // A ^ ~A == -1
5155 return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
5158 BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1);
5161 if (match(Op1I, m_Or(m_Value(A), m_Value(B)))) {
5162 if (A == Op0) { // B^(B|A) == (A|B)^B
5163 Op1I->swapOperands();
5165 std::swap(Op0, Op1);
5166 } else if (B == Op0) { // B^(A|B) == (A|B)^B
5167 I.swapOperands(); // Simplified below.
5168 std::swap(Op0, Op1);
5170 } else if (match(Op1I, m_Xor(m_Specific(Op0), m_Value(B)))) {
5171 return ReplaceInstUsesWith(I, B); // A^(A^B) == B
5172 } else if (match(Op1I, m_Xor(m_Value(A), m_Specific(Op0)))) {
5173 return ReplaceInstUsesWith(I, A); // A^(B^A) == B
5174 } else if (match(Op1I, m_And(m_Value(A), m_Value(B))) &&
5176 if (A == Op0) { // A^(A&B) -> A^(B&A)
5177 Op1I->swapOperands();
5180 if (B == Op0) { // A^(B&A) -> (B&A)^A
5181 I.swapOperands(); // Simplified below.
5182 std::swap(Op0, Op1);
5187 BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0);
5190 if (match(Op0I, m_Or(m_Value(A), m_Value(B))) &&
5191 Op0I->hasOneUse()) {
5192 if (A == Op1) // (B|A)^B == (A|B)^B
5194 if (B == Op1) // (A|B)^B == A & ~B
5195 return BinaryOperator::CreateAnd(A, Builder->CreateNot(Op1, "tmp"));
5196 } else if (match(Op0I, m_Xor(m_Specific(Op1), m_Value(B)))) {
5197 return ReplaceInstUsesWith(I, B); // (A^B)^A == B
5198 } else if (match(Op0I, m_Xor(m_Value(A), m_Specific(Op1)))) {
5199 return ReplaceInstUsesWith(I, A); // (B^A)^A == B
5200 } else if (match(Op0I, m_And(m_Value(A), m_Value(B))) &&
5202 if (A == Op1) // (A&B)^A -> (B&A)^A
5204 if (B == Op1 && // (B&A)^A == ~B & A
5205 !isa<ConstantInt>(Op1)) { // Canonical form is (B&C)^C
5206 return BinaryOperator::CreateAnd(Builder->CreateNot(A, "tmp"), Op1);
5211 // (X >> Z) ^ (Y >> Z) -> (X^Y) >> Z for all shifts.
5212 if (Op0I && Op1I && Op0I->isShift() &&
5213 Op0I->getOpcode() == Op1I->getOpcode() &&
5214 Op0I->getOperand(1) == Op1I->getOperand(1) &&
5215 (Op1I->hasOneUse() || Op1I->hasOneUse())) {
5217 Builder->CreateXor(Op0I->getOperand(0), Op1I->getOperand(0),
5219 return BinaryOperator::Create(Op1I->getOpcode(), NewOp,
5220 Op1I->getOperand(1));
5224 Value *A, *B, *C, *D;
5225 // (A & B)^(A | B) -> A ^ B
5226 if (match(Op0I, m_And(m_Value(A), m_Value(B))) &&
5227 match(Op1I, m_Or(m_Value(C), m_Value(D)))) {
5228 if ((A == C && B == D) || (A == D && B == C))
5229 return BinaryOperator::CreateXor(A, B);
5231 // (A | B)^(A & B) -> A ^ B
5232 if (match(Op0I, m_Or(m_Value(A), m_Value(B))) &&
5233 match(Op1I, m_And(m_Value(C), m_Value(D)))) {
5234 if ((A == C && B == D) || (A == D && B == C))
5235 return BinaryOperator::CreateXor(A, B);
5239 if ((Op0I->hasOneUse() || Op1I->hasOneUse()) &&
5240 match(Op0I, m_And(m_Value(A), m_Value(B))) &&
5241 match(Op1I, m_And(m_Value(C), m_Value(D)))) {
5242 // (X & Y)^(X & Y) -> (Y^Z) & X
5243 Value *X = 0, *Y = 0, *Z = 0;
5245 X = A, Y = B, Z = D;
5247 X = A, Y = B, Z = C;
5249 X = B, Y = A, Z = D;
5251 X = B, Y = A, Z = C;
5254 Value *NewOp = Builder->CreateXor(Y, Z, Op0->getName());
5255 return BinaryOperator::CreateAnd(NewOp, X);
5260 // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B)
5261 if (ICmpInst *RHS = dyn_cast<ICmpInst>(I.getOperand(1)))
5262 if (Instruction *R = AssociativeOpt(I, FoldICmpLogical(*this, RHS)))
5265 // fold (xor (cast A), (cast B)) -> (cast (xor A, B))
5266 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
5267 if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
5268 if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind?
5269 const Type *SrcTy = Op0C->getOperand(0)->getType();
5270 if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isInteger() &&
5271 // Only do this if the casts both really cause code to be generated.
5272 ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0),
5274 ValueRequiresCast(Op1C->getOpcode(), Op1C->getOperand(0),
5276 Value *NewOp = Builder->CreateXor(Op0C->getOperand(0),
5277 Op1C->getOperand(0), I.getName());
5278 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
5283 return Changed ? &I : 0;
5286 static ConstantInt *ExtractElement(Constant *V, Constant *Idx,
5287 LLVMContext *Context) {
5288 return cast<ConstantInt>(ConstantExpr::getExtractElement(V, Idx));
5291 static bool HasAddOverflow(ConstantInt *Result,
5292 ConstantInt *In1, ConstantInt *In2,
5295 if (In2->getValue().isNegative())
5296 return Result->getValue().sgt(In1->getValue());
5298 return Result->getValue().slt(In1->getValue());
5300 return Result->getValue().ult(In1->getValue());
5303 /// AddWithOverflow - Compute Result = In1+In2, returning true if the result
5304 /// overflowed for this type.
5305 static bool AddWithOverflow(Constant *&Result, Constant *In1,
5306 Constant *In2, LLVMContext *Context,
5307 bool IsSigned = false) {
5308 Result = ConstantExpr::getAdd(In1, In2);
5310 if (const VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
5311 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
5312 Constant *Idx = ConstantInt::get(Type::getInt32Ty(*Context), i);
5313 if (HasAddOverflow(ExtractElement(Result, Idx, Context),
5314 ExtractElement(In1, Idx, Context),
5315 ExtractElement(In2, Idx, Context),
5322 return HasAddOverflow(cast<ConstantInt>(Result),
5323 cast<ConstantInt>(In1), cast<ConstantInt>(In2),
5327 static bool HasSubOverflow(ConstantInt *Result,
5328 ConstantInt *In1, ConstantInt *In2,
5331 if (In2->getValue().isNegative())
5332 return Result->getValue().slt(In1->getValue());
5334 return Result->getValue().sgt(In1->getValue());
5336 return Result->getValue().ugt(In1->getValue());
5339 /// SubWithOverflow - Compute Result = In1-In2, returning true if the result
5340 /// overflowed for this type.
5341 static bool SubWithOverflow(Constant *&Result, Constant *In1,
5342 Constant *In2, LLVMContext *Context,
5343 bool IsSigned = false) {
5344 Result = ConstantExpr::getSub(In1, In2);
5346 if (const VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
5347 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
5348 Constant *Idx = ConstantInt::get(Type::getInt32Ty(*Context), i);
5349 if (HasSubOverflow(ExtractElement(Result, Idx, Context),
5350 ExtractElement(In1, Idx, Context),
5351 ExtractElement(In2, Idx, Context),
5358 return HasSubOverflow(cast<ConstantInt>(Result),
5359 cast<ConstantInt>(In1), cast<ConstantInt>(In2),
5363 /// EmitGEPOffset - Given a getelementptr instruction/constantexpr, emit the
5364 /// code necessary to compute the offset from the base pointer (without adding
5365 /// in the base pointer). Return the result as a signed integer of intptr size.
5366 static Value *EmitGEPOffset(User *GEP, Instruction &I, InstCombiner &IC) {
5367 TargetData &TD = *IC.getTargetData();
5368 gep_type_iterator GTI = gep_type_begin(GEP);
5369 const Type *IntPtrTy = TD.getIntPtrType(I.getContext());
5370 Value *Result = Constant::getNullValue(IntPtrTy);
5372 // Build a mask for high order bits.
5373 unsigned IntPtrWidth = TD.getPointerSizeInBits();
5374 uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
5376 for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
5379 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
5380 if (ConstantInt *OpC = dyn_cast<ConstantInt>(Op)) {
5381 if (OpC->isZero()) continue;
5383 // Handle a struct index, which adds its field offset to the pointer.
5384 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
5385 Size = TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
5387 Result = IC.Builder->CreateAdd(Result,
5388 ConstantInt::get(IntPtrTy, Size),
5389 GEP->getName()+".offs");
5393 Constant *Scale = ConstantInt::get(IntPtrTy, Size);
5395 ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
5396 Scale = ConstantExpr::getMul(OC, Scale);
5397 // Emit an add instruction.
5398 Result = IC.Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
5401 // Convert to correct type.
5402 if (Op->getType() != IntPtrTy)
5403 Op = IC.Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
5405 Constant *Scale = ConstantInt::get(IntPtrTy, Size);
5406 // We'll let instcombine(mul) convert this to a shl if possible.
5407 Op = IC.Builder->CreateMul(Op, Scale, GEP->getName()+".idx");
5410 // Emit an add instruction.
5411 Result = IC.Builder->CreateAdd(Op, Result, GEP->getName()+".offs");
5417 /// EvaluateGEPOffsetExpression - Return a value that can be used to compare
5418 /// the *offset* implied by a GEP to zero. For example, if we have &A[i], we
5419 /// want to return 'i' for "icmp ne i, 0". Note that, in general, indices can
5420 /// be complex, and scales are involved. The above expression would also be
5421 /// legal to codegen as "icmp ne (i*4), 0" (assuming A is a pointer to i32).
5422 /// This later form is less amenable to optimization though, and we are allowed
5423 /// to generate the first by knowing that pointer arithmetic doesn't overflow.
5425 /// If we can't emit an optimized form for this expression, this returns null.
5427 static Value *EvaluateGEPOffsetExpression(User *GEP, Instruction &I,
5429 TargetData &TD = *IC.getTargetData();
5430 gep_type_iterator GTI = gep_type_begin(GEP);
5432 // Check to see if this gep only has a single variable index. If so, and if
5433 // any constant indices are a multiple of its scale, then we can compute this
5434 // in terms of the scale of the variable index. For example, if the GEP
5435 // implies an offset of "12 + i*4", then we can codegen this as "3 + i",
5436 // because the expression will cross zero at the same point.
5437 unsigned i, e = GEP->getNumOperands();
5439 for (i = 1; i != e; ++i, ++GTI) {
5440 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
5441 // Compute the aggregate offset of constant indices.
5442 if (CI->isZero()) continue;
5444 // Handle a struct index, which adds its field offset to the pointer.
5445 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
5446 Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
5448 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
5449 Offset += Size*CI->getSExtValue();
5452 // Found our variable index.
5457 // If there are no variable indices, we must have a constant offset, just
5458 // evaluate it the general way.
5459 if (i == e) return 0;
5461 Value *VariableIdx = GEP->getOperand(i);
5462 // Determine the scale factor of the variable element. For example, this is
5463 // 4 if the variable index is into an array of i32.
5464 uint64_t VariableScale = TD.getTypeAllocSize(GTI.getIndexedType());
5466 // Verify that there are no other variable indices. If so, emit the hard way.
5467 for (++i, ++GTI; i != e; ++i, ++GTI) {
5468 ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i));
5471 // Compute the aggregate offset of constant indices.
5472 if (CI->isZero()) continue;
5474 // Handle a struct index, which adds its field offset to the pointer.
5475 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
5476 Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
5478 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
5479 Offset += Size*CI->getSExtValue();
5483 // Okay, we know we have a single variable index, which must be a
5484 // pointer/array/vector index. If there is no offset, life is simple, return
5486 unsigned IntPtrWidth = TD.getPointerSizeInBits();
5488 // Cast to intptrty in case a truncation occurs. If an extension is needed,
5489 // we don't need to bother extending: the extension won't affect where the
5490 // computation crosses zero.
5491 if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth)
5492 VariableIdx = new TruncInst(VariableIdx,
5493 TD.getIntPtrType(VariableIdx->getContext()),
5494 VariableIdx->getName(), &I);
5498 // Otherwise, there is an index. The computation we will do will be modulo
5499 // the pointer size, so get it.
5500 uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
5502 Offset &= PtrSizeMask;
5503 VariableScale &= PtrSizeMask;
5505 // To do this transformation, any constant index must be a multiple of the
5506 // variable scale factor. For example, we can evaluate "12 + 4*i" as "3 + i",
5507 // but we can't evaluate "10 + 3*i" in terms of i. Check that the offset is a
5508 // multiple of the variable scale.
5509 int64_t NewOffs = Offset / (int64_t)VariableScale;
5510 if (Offset != NewOffs*(int64_t)VariableScale)
5513 // Okay, we can do this evaluation. Start by converting the index to intptr.
5514 const Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext());
5515 if (VariableIdx->getType() != IntPtrTy)
5516 VariableIdx = CastInst::CreateIntegerCast(VariableIdx, IntPtrTy,
5518 VariableIdx->getName(), &I);
5519 Constant *OffsetVal = ConstantInt::get(IntPtrTy, NewOffs);
5520 return BinaryOperator::CreateAdd(VariableIdx, OffsetVal, "offset", &I);
5524 /// FoldGEPICmp - Fold comparisons between a GEP instruction and something
5525 /// else. At this point we know that the GEP is on the LHS of the comparison.
5526 Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
5527 ICmpInst::Predicate Cond,
5529 // Look through bitcasts.
5530 if (BitCastInst *BCI = dyn_cast<BitCastInst>(RHS))
5531 RHS = BCI->getOperand(0);
5533 Value *PtrBase = GEPLHS->getOperand(0);
5534 if (TD && PtrBase == RHS && GEPLHS->isInBounds()) {
5535 // ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0).
5536 // This transformation (ignoring the base and scales) is valid because we
5537 // know pointers can't overflow since the gep is inbounds. See if we can
5538 // output an optimized form.
5539 Value *Offset = EvaluateGEPOffsetExpression(GEPLHS, I, *this);
5541 // If not, synthesize the offset the hard way.
5543 Offset = EmitGEPOffset(GEPLHS, I, *this);
5544 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Offset,
5545 Constant::getNullValue(Offset->getType()));
5546 } else if (GEPOperator *GEPRHS = dyn_cast<GEPOperator>(RHS)) {
5547 // If the base pointers are different, but the indices are the same, just
5548 // compare the base pointer.
5549 if (PtrBase != GEPRHS->getOperand(0)) {
5550 bool IndicesTheSame = GEPLHS->getNumOperands()==GEPRHS->getNumOperands();
5551 IndicesTheSame &= GEPLHS->getOperand(0)->getType() ==
5552 GEPRHS->getOperand(0)->getType();
5554 for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
5555 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
5556 IndicesTheSame = false;
5560 // If all indices are the same, just compare the base pointers.
5562 return new ICmpInst(ICmpInst::getSignedPredicate(Cond),
5563 GEPLHS->getOperand(0), GEPRHS->getOperand(0));
5565 // Otherwise, the base pointers are different and the indices are
5566 // different, bail out.
5570 // If one of the GEPs has all zero indices, recurse.
5571 bool AllZeros = true;
5572 for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
5573 if (!isa<Constant>(GEPLHS->getOperand(i)) ||
5574 !cast<Constant>(GEPLHS->getOperand(i))->isNullValue()) {
5579 return FoldGEPICmp(GEPRHS, GEPLHS->getOperand(0),
5580 ICmpInst::getSwappedPredicate(Cond), I);
5582 // If the other GEP has all zero indices, recurse.
5584 for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
5585 if (!isa<Constant>(GEPRHS->getOperand(i)) ||
5586 !cast<Constant>(GEPRHS->getOperand(i))->isNullValue()) {
5591 return FoldGEPICmp(GEPLHS, GEPRHS->getOperand(0), Cond, I);
5593 if (GEPLHS->getNumOperands() == GEPRHS->getNumOperands()) {
5594 // If the GEPs only differ by one index, compare it.
5595 unsigned NumDifferences = 0; // Keep track of # differences.
5596 unsigned DiffOperand = 0; // The operand that differs.
5597 for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
5598 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
5599 if (GEPLHS->getOperand(i)->getType()->getPrimitiveSizeInBits() !=
5600 GEPRHS->getOperand(i)->getType()->getPrimitiveSizeInBits()) {
5601 // Irreconcilable differences.
5605 if (NumDifferences++) break;
5610 if (NumDifferences == 0) // SAME GEP?
5611 return ReplaceInstUsesWith(I, // No comparison is needed here.
5612 ConstantInt::get(Type::getInt1Ty(*Context),
5613 ICmpInst::isTrueWhenEqual(Cond)));
5615 else if (NumDifferences == 1) {
5616 Value *LHSV = GEPLHS->getOperand(DiffOperand);
5617 Value *RHSV = GEPRHS->getOperand(DiffOperand);
5618 // Make sure we do a signed comparison here.
5619 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), LHSV, RHSV);
5623 // Only lower this if the icmp is the only user of the GEP or if we expect
5624 // the result to fold to a constant!
5626 (isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) &&
5627 (isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) {
5628 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) ---> (OFFSET1 cmp OFFSET2)
5629 Value *L = EmitGEPOffset(GEPLHS, I, *this);
5630 Value *R = EmitGEPOffset(GEPRHS, I, *this);
5631 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), L, R);
5637 /// FoldFCmp_IntToFP_Cst - Fold fcmp ([us]itofp x, cst) if possible.
5639 Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I,
5642 if (!isa<ConstantFP>(RHSC)) return 0;
5643 const APFloat &RHS = cast<ConstantFP>(RHSC)->getValueAPF();
5645 // Get the width of the mantissa. We don't want to hack on conversions that
5646 // might lose information from the integer, e.g. "i64 -> float"
5647 int MantissaWidth = LHSI->getType()->getFPMantissaWidth();
5648 if (MantissaWidth == -1) return 0; // Unknown.
5650 // Check to see that the input is converted from an integer type that is small
5651 // enough that preserves all bits. TODO: check here for "known" sign bits.
5652 // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e.
5653 unsigned InputSize = LHSI->getOperand(0)->getType()->getScalarSizeInBits();
5655 // If this is a uitofp instruction, we need an extra bit to hold the sign.
5656 bool LHSUnsigned = isa<UIToFPInst>(LHSI);
5660 // If the conversion would lose info, don't hack on this.
5661 if ((int)InputSize > MantissaWidth)
5664 // Otherwise, we can potentially simplify the comparison. We know that it
5665 // will always come through as an integer value and we know the constant is
5666 // not a NAN (it would have been previously simplified).
5667 assert(!RHS.isNaN() && "NaN comparison not already folded!");
5669 ICmpInst::Predicate Pred;
5670 switch (I.getPredicate()) {
5671 default: llvm_unreachable("Unexpected predicate!");
5672 case FCmpInst::FCMP_UEQ:
5673 case FCmpInst::FCMP_OEQ:
5674 Pred = ICmpInst::ICMP_EQ;
5676 case FCmpInst::FCMP_UGT:
5677 case FCmpInst::FCMP_OGT:
5678 Pred = LHSUnsigned ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_SGT;
5680 case FCmpInst::FCMP_UGE:
5681 case FCmpInst::FCMP_OGE:
5682 Pred = LHSUnsigned ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE;
5684 case FCmpInst::FCMP_ULT:
5685 case FCmpInst::FCMP_OLT:
5686 Pred = LHSUnsigned ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_SLT;
5688 case FCmpInst::FCMP_ULE:
5689 case FCmpInst::FCMP_OLE:
5690 Pred = LHSUnsigned ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_SLE;
5692 case FCmpInst::FCMP_UNE:
5693 case FCmpInst::FCMP_ONE:
5694 Pred = ICmpInst::ICMP_NE;
5696 case FCmpInst::FCMP_ORD:
5697 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
5698 case FCmpInst::FCMP_UNO:
5699 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
5702 const IntegerType *IntTy = cast<IntegerType>(LHSI->getOperand(0)->getType());
5704 // Now we know that the APFloat is a normal number, zero or inf.
5706 // See if the FP constant is too large for the integer. For example,
5707 // comparing an i8 to 300.0.
5708 unsigned IntWidth = IntTy->getScalarSizeInBits();
5711 // If the RHS value is > SignedMax, fold the comparison. This handles +INF
5712 // and large values.
5713 APFloat SMax(RHS.getSemantics(), APFloat::fcZero, false);
5714 SMax.convertFromAPInt(APInt::getSignedMaxValue(IntWidth), true,
5715 APFloat::rmNearestTiesToEven);
5716 if (SMax.compare(RHS) == APFloat::cmpLessThan) { // smax < 13123.0
5717 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT ||
5718 Pred == ICmpInst::ICMP_SLE)
5719 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
5720 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
5723 // If the RHS value is > UnsignedMax, fold the comparison. This handles
5724 // +INF and large values.
5725 APFloat UMax(RHS.getSemantics(), APFloat::fcZero, false);
5726 UMax.convertFromAPInt(APInt::getMaxValue(IntWidth), false,
5727 APFloat::rmNearestTiesToEven);
5728 if (UMax.compare(RHS) == APFloat::cmpLessThan) { // umax < 13123.0
5729 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT ||
5730 Pred == ICmpInst::ICMP_ULE)
5731 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
5732 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
5737 // See if the RHS value is < SignedMin.
5738 APFloat SMin(RHS.getSemantics(), APFloat::fcZero, false);
5739 SMin.convertFromAPInt(APInt::getSignedMinValue(IntWidth), true,
5740 APFloat::rmNearestTiesToEven);
5741 if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // smin > 12312.0
5742 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT ||
5743 Pred == ICmpInst::ICMP_SGE)
5744 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
5745 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
5749 // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or
5750 // [0, UMAX], but it may still be fractional. See if it is fractional by
5751 // casting the FP value to the integer value and back, checking for equality.
5752 // Don't do this for zero, because -0.0 is not fractional.
5753 Constant *RHSInt = LHSUnsigned
5754 ? ConstantExpr::getFPToUI(RHSC, IntTy)
5755 : ConstantExpr::getFPToSI(RHSC, IntTy);
5756 if (!RHS.isZero()) {
5757 bool Equal = LHSUnsigned
5758 ? ConstantExpr::getUIToFP(RHSInt, RHSC->getType()) == RHSC
5759 : ConstantExpr::getSIToFP(RHSInt, RHSC->getType()) == RHSC;
5761 // If we had a comparison against a fractional value, we have to adjust
5762 // the compare predicate and sometimes the value. RHSC is rounded towards
5763 // zero at this point.
5765 default: llvm_unreachable("Unexpected integer comparison!");
5766 case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true
5767 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
5768 case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false
5769 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
5770 case ICmpInst::ICMP_ULE:
5771 // (float)int <= 4.4 --> int <= 4
5772 // (float)int <= -4.4 --> false
5773 if (RHS.isNegative())
5774 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
5776 case ICmpInst::ICMP_SLE:
5777 // (float)int <= 4.4 --> int <= 4
5778 // (float)int <= -4.4 --> int < -4
5779 if (RHS.isNegative())
5780 Pred = ICmpInst::ICMP_SLT;
5782 case ICmpInst::ICMP_ULT:
5783 // (float)int < -4.4 --> false
5784 // (float)int < 4.4 --> int <= 4
5785 if (RHS.isNegative())
5786 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
5787 Pred = ICmpInst::ICMP_ULE;
5789 case ICmpInst::ICMP_SLT:
5790 // (float)int < -4.4 --> int < -4
5791 // (float)int < 4.4 --> int <= 4
5792 if (!RHS.isNegative())
5793 Pred = ICmpInst::ICMP_SLE;
5795 case ICmpInst::ICMP_UGT:
5796 // (float)int > 4.4 --> int > 4
5797 // (float)int > -4.4 --> true
5798 if (RHS.isNegative())
5799 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
5801 case ICmpInst::ICMP_SGT:
5802 // (float)int > 4.4 --> int > 4
5803 // (float)int > -4.4 --> int >= -4
5804 if (RHS.isNegative())
5805 Pred = ICmpInst::ICMP_SGE;
5807 case ICmpInst::ICMP_UGE:
5808 // (float)int >= -4.4 --> true
5809 // (float)int >= 4.4 --> int > 4
5810 if (!RHS.isNegative())
5811 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
5812 Pred = ICmpInst::ICMP_UGT;
5814 case ICmpInst::ICMP_SGE:
5815 // (float)int >= -4.4 --> int >= -4
5816 // (float)int >= 4.4 --> int > 4
5817 if (!RHS.isNegative())
5818 Pred = ICmpInst::ICMP_SGT;
5824 // Lower this FP comparison into an appropriate integer version of the
5826 return new ICmpInst(Pred, LHSI->getOperand(0), RHSInt);
5829 Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
5830 bool Changed = SimplifyCompare(I);
5831 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5833 // Fold trivial predicates.
5834 if (I.getPredicate() == FCmpInst::FCMP_FALSE)
5835 return ReplaceInstUsesWith(I, ConstantInt::get(I.getType(), 0));
5836 if (I.getPredicate() == FCmpInst::FCMP_TRUE)
5837 return ReplaceInstUsesWith(I, ConstantInt::get(I.getType(), 1));
5839 // Simplify 'fcmp pred X, X'
5841 switch (I.getPredicate()) {
5842 default: llvm_unreachable("Unknown predicate!");
5843 case FCmpInst::FCMP_UEQ: // True if unordered or equal
5844 case FCmpInst::FCMP_UGE: // True if unordered, greater than, or equal
5845 case FCmpInst::FCMP_ULE: // True if unordered, less than, or equal
5846 return ReplaceInstUsesWith(I, ConstantInt::get(I.getType(), 1));
5847 case FCmpInst::FCMP_OGT: // True if ordered and greater than
5848 case FCmpInst::FCMP_OLT: // True if ordered and less than
5849 case FCmpInst::FCMP_ONE: // True if ordered and operands are unequal
5850 return ReplaceInstUsesWith(I, ConstantInt::get(I.getType(), 0));
5852 case FCmpInst::FCMP_UNO: // True if unordered: isnan(X) | isnan(Y)
5853 case FCmpInst::FCMP_ULT: // True if unordered or less than
5854 case FCmpInst::FCMP_UGT: // True if unordered or greater than
5855 case FCmpInst::FCMP_UNE: // True if unordered or not equal
5856 // Canonicalize these to be 'fcmp uno %X, 0.0'.
5857 I.setPredicate(FCmpInst::FCMP_UNO);
5858 I.setOperand(1, Constant::getNullValue(Op0->getType()));
5861 case FCmpInst::FCMP_ORD: // True if ordered (no nans)
5862 case FCmpInst::FCMP_OEQ: // True if ordered and equal
5863 case FCmpInst::FCMP_OGE: // True if ordered and greater than or equal
5864 case FCmpInst::FCMP_OLE: // True if ordered and less than or equal
5865 // Canonicalize these to be 'fcmp ord %X, 0.0'.
5866 I.setPredicate(FCmpInst::FCMP_ORD);
5867 I.setOperand(1, Constant::getNullValue(Op0->getType()));
5872 if (isa<UndefValue>(Op1)) // fcmp pred X, undef -> undef
5873 return ReplaceInstUsesWith(I, UndefValue::get(I.getType()));
5875 // Handle fcmp with constant RHS
5876 if (Constant *RHSC = dyn_cast<Constant>(Op1)) {
5877 // If the constant is a nan, see if we can fold the comparison based on it.
5878 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHSC)) {
5879 if (CFP->getValueAPF().isNaN()) {
5880 if (FCmpInst::isOrdered(I.getPredicate())) // True if ordered and...
5881 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
5882 assert(FCmpInst::isUnordered(I.getPredicate()) &&
5883 "Comparison must be either ordered or unordered!");
5884 // True if unordered.
5885 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
5889 if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
5890 switch (LHSI->getOpcode()) {
5891 case Instruction::PHI:
5892 // Only fold fcmp into the PHI if the phi and fcmp are in the same
5893 // block. If in the same block, we're encouraging jump threading. If
5894 // not, we are just pessimizing the code by making an i1 phi.
5895 if (LHSI->getParent() == I.getParent())
5896 if (Instruction *NV = FoldOpIntoPhi(I, true))
5899 case Instruction::SIToFP:
5900 case Instruction::UIToFP:
5901 if (Instruction *NV = FoldFCmp_IntToFP_Cst(I, LHSI, RHSC))
5904 case Instruction::Select:
5905 // If either operand of the select is a constant, we can fold the
5906 // comparison into the select arms, which will cause one to be
5907 // constant folded and the select turned into a bitwise or.
5908 Value *Op1 = 0, *Op2 = 0;
5909 if (LHSI->hasOneUse()) {
5910 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) {
5911 // Fold the known value into the constant operand.
5912 Op1 = ConstantExpr::getCompare(I.getPredicate(), C, RHSC);
5913 // Insert a new FCmp of the other select operand.
5914 Op2 = Builder->CreateFCmp(I.getPredicate(),
5915 LHSI->getOperand(2), RHSC, I.getName());
5916 } else if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) {
5917 // Fold the known value into the constant operand.
5918 Op2 = ConstantExpr::getCompare(I.getPredicate(), C, RHSC);
5919 // Insert a new FCmp of the other select operand.
5920 Op1 = Builder->CreateFCmp(I.getPredicate(), LHSI->getOperand(1),
5926 return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
5931 return Changed ? &I : 0;
5934 Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
5935 bool Changed = SimplifyCompare(I);
5936 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5937 const Type *Ty = Op0->getType();
5941 return ReplaceInstUsesWith(I, ConstantInt::get(I.getType(),
5942 I.isTrueWhenEqual()));
5944 if (isa<UndefValue>(Op1)) // X icmp undef -> undef
5945 return ReplaceInstUsesWith(I, UndefValue::get(I.getType()));
5947 // icmp <global/alloca*/null>, <global/alloca*/null> - Global/Stack value
5948 // addresses never equal each other! We already know that Op0 != Op1.
5949 if ((isa<GlobalValue>(Op0) || isa<AllocaInst>(Op0) ||
5950 isa<ConstantPointerNull>(Op0)) &&
5951 (isa<GlobalValue>(Op1) || isa<AllocaInst>(Op1) ||
5952 isa<ConstantPointerNull>(Op1)))
5953 return ReplaceInstUsesWith(I, ConstantInt::get(Type::getInt1Ty(*Context),
5954 !I.isTrueWhenEqual()));
5956 // icmp's with boolean values can always be turned into bitwise operations
5957 if (Ty == Type::getInt1Ty(*Context)) {
5958 switch (I.getPredicate()) {
5959 default: llvm_unreachable("Invalid icmp instruction!");
5960 case ICmpInst::ICMP_EQ: { // icmp eq i1 A, B -> ~(A^B)
5961 Value *Xor = Builder->CreateXor(Op0, Op1, I.getName()+"tmp");
5962 return BinaryOperator::CreateNot(Xor);
5964 case ICmpInst::ICMP_NE: // icmp eq i1 A, B -> A^B
5965 return BinaryOperator::CreateXor(Op0, Op1);
5967 case ICmpInst::ICMP_UGT:
5968 std::swap(Op0, Op1); // Change icmp ugt -> icmp ult
5970 case ICmpInst::ICMP_ULT:{ // icmp ult i1 A, B -> ~A & B
5971 Value *Not = Builder->CreateNot(Op0, I.getName()+"tmp");
5972 return BinaryOperator::CreateAnd(Not, Op1);
5974 case ICmpInst::ICMP_SGT:
5975 std::swap(Op0, Op1); // Change icmp sgt -> icmp slt
5977 case ICmpInst::ICMP_SLT: { // icmp slt i1 A, B -> A & ~B
5978 Value *Not = Builder->CreateNot(Op1, I.getName()+"tmp");
5979 return BinaryOperator::CreateAnd(Not, Op0);
5981 case ICmpInst::ICMP_UGE:
5982 std::swap(Op0, Op1); // Change icmp uge -> icmp ule
5984 case ICmpInst::ICMP_ULE: { // icmp ule i1 A, B -> ~A | B
5985 Value *Not = Builder->CreateNot(Op0, I.getName()+"tmp");
5986 return BinaryOperator::CreateOr(Not, Op1);
5988 case ICmpInst::ICMP_SGE:
5989 std::swap(Op0, Op1); // Change icmp sge -> icmp sle
5991 case ICmpInst::ICMP_SLE: { // icmp sle i1 A, B -> A | ~B
5992 Value *Not = Builder->CreateNot(Op1, I.getName()+"tmp");
5993 return BinaryOperator::CreateOr(Not, Op0);
5998 unsigned BitWidth = 0;
6000 BitWidth = TD->getTypeSizeInBits(Ty->getScalarType());
6001 else if (Ty->isIntOrIntVector())
6002 BitWidth = Ty->getScalarSizeInBits();
6004 bool isSignBit = false;
6006 // See if we are doing a comparison with a constant.
6007 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
6008 Value *A = 0, *B = 0;
6010 // (icmp ne/eq (sub A B) 0) -> (icmp ne/eq A, B)
6011 if (I.isEquality() && CI->isNullValue() &&
6012 match(Op0, m_Sub(m_Value(A), m_Value(B)))) {
6013 // (icmp cond A B) if cond is equality
6014 return new ICmpInst(I.getPredicate(), A, B);
6017 // If we have an icmp le or icmp ge instruction, turn it into the
6018 // appropriate icmp lt or icmp gt instruction. This allows us to rely on
6019 // them being folded in the code below.
6020 switch (I.getPredicate()) {
6022 case ICmpInst::ICMP_ULE:
6023 if (CI->isMaxValue(false)) // A <=u MAX -> TRUE
6024 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6025 return new ICmpInst(ICmpInst::ICMP_ULT, Op0,
6027 case ICmpInst::ICMP_SLE:
6028 if (CI->isMaxValue(true)) // A <=s MAX -> TRUE
6029 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6030 return new ICmpInst(ICmpInst::ICMP_SLT, Op0,
6032 case ICmpInst::ICMP_UGE:
6033 if (CI->isMinValue(false)) // A >=u MIN -> TRUE
6034 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6035 return new ICmpInst(ICmpInst::ICMP_UGT, Op0,
6037 case ICmpInst::ICMP_SGE:
6038 if (CI->isMinValue(true)) // A >=s MIN -> TRUE
6039 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6040 return new ICmpInst(ICmpInst::ICMP_SGT, Op0,
6044 // If this comparison is a normal comparison, it demands all
6045 // bits, if it is a sign bit comparison, it only demands the sign bit.
6047 isSignBit = isSignBitCheck(I.getPredicate(), CI, UnusedBit);
6050 // See if we can fold the comparison based on range information we can get
6051 // by checking whether bits are known to be zero or one in the input.
6052 if (BitWidth != 0) {
6053 APInt Op0KnownZero(BitWidth, 0), Op0KnownOne(BitWidth, 0);
6054 APInt Op1KnownZero(BitWidth, 0), Op1KnownOne(BitWidth, 0);
6056 if (SimplifyDemandedBits(I.getOperandUse(0),
6057 isSignBit ? APInt::getSignBit(BitWidth)
6058 : APInt::getAllOnesValue(BitWidth),
6059 Op0KnownZero, Op0KnownOne, 0))
6061 if (SimplifyDemandedBits(I.getOperandUse(1),
6062 APInt::getAllOnesValue(BitWidth),
6063 Op1KnownZero, Op1KnownOne, 0))
6066 // Given the known and unknown bits, compute a range that the LHS could be
6067 // in. Compute the Min, Max and RHS values based on the known bits. For the
6068 // EQ and NE we use unsigned values.
6069 APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0);
6070 APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0);
6071 if (ICmpInst::isSignedPredicate(I.getPredicate())) {
6072 ComputeSignedMinMaxValuesFromKnownBits(Op0KnownZero, Op0KnownOne,
6074 ComputeSignedMinMaxValuesFromKnownBits(Op1KnownZero, Op1KnownOne,
6077 ComputeUnsignedMinMaxValuesFromKnownBits(Op0KnownZero, Op0KnownOne,
6079 ComputeUnsignedMinMaxValuesFromKnownBits(Op1KnownZero, Op1KnownOne,
6083 // If Min and Max are known to be the same, then SimplifyDemandedBits
6084 // figured out that the LHS is a constant. Just constant fold this now so
6085 // that code below can assume that Min != Max.
6086 if (!isa<Constant>(Op0) && Op0Min == Op0Max)
6087 return new ICmpInst(I.getPredicate(),
6088 ConstantInt::get(*Context, Op0Min), Op1);
6089 if (!isa<Constant>(Op1) && Op1Min == Op1Max)
6090 return new ICmpInst(I.getPredicate(), Op0,
6091 ConstantInt::get(*Context, Op1Min));
6093 // Based on the range information we know about the LHS, see if we can
6094 // simplify this comparison. For example, (x&4) < 8 is always true.
6095 switch (I.getPredicate()) {
6096 default: llvm_unreachable("Unknown icmp opcode!");
6097 case ICmpInst::ICMP_EQ:
6098 if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max))
6099 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
6101 case ICmpInst::ICMP_NE:
6102 if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max))
6103 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6105 case ICmpInst::ICMP_ULT:
6106 if (Op0Max.ult(Op1Min)) // A <u B -> true if max(A) < min(B)
6107 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6108 if (Op0Min.uge(Op1Max)) // A <u B -> false if min(A) >= max(B)
6109 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
6110 if (Op1Min == Op0Max) // A <u B -> A != B if max(A) == min(B)
6111 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
6112 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
6113 if (Op1Max == Op0Min+1) // A <u C -> A == C-1 if min(A)+1 == C
6114 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
6117 // (x <u 2147483648) -> (x >s -1) -> true if sign bit clear
6118 if (CI->isMinValue(true))
6119 return new ICmpInst(ICmpInst::ICMP_SGT, Op0,
6120 Constant::getAllOnesValue(Op0->getType()));
6123 case ICmpInst::ICMP_UGT:
6124 if (Op0Min.ugt(Op1Max)) // A >u B -> true if min(A) > max(B)
6125 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6126 if (Op0Max.ule(Op1Min)) // A >u B -> false if max(A) <= max(B)
6127 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
6129 if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B)
6130 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
6131 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
6132 if (Op1Min == Op0Max-1) // A >u C -> A == C+1 if max(a)-1 == C
6133 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
6136 // (x >u 2147483647) -> (x <s 0) -> true if sign bit set
6137 if (CI->isMaxValue(true))
6138 return new ICmpInst(ICmpInst::ICMP_SLT, Op0,
6139 Constant::getNullValue(Op0->getType()));
6142 case ICmpInst::ICMP_SLT:
6143 if (Op0Max.slt(Op1Min)) // A <s B -> true if max(A) < min(C)
6144 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6145 if (Op0Min.sge(Op1Max)) // A <s B -> false if min(A) >= max(C)
6146 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
6147 if (Op1Min == Op0Max) // A <s B -> A != B if max(A) == min(B)
6148 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
6149 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
6150 if (Op1Max == Op0Min+1) // A <s C -> A == C-1 if min(A)+1 == C
6151 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
6155 case ICmpInst::ICMP_SGT:
6156 if (Op0Min.sgt(Op1Max)) // A >s B -> true if min(A) > max(B)
6157 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6158 if (Op0Max.sle(Op1Min)) // A >s B -> false if max(A) <= min(B)
6159 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
6161 if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B)
6162 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
6163 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
6164 if (Op1Min == Op0Max-1) // A >s C -> A == C+1 if max(A)-1 == C
6165 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
6169 case ICmpInst::ICMP_SGE:
6170 assert(!isa<ConstantInt>(Op1) && "ICMP_SGE with ConstantInt not folded!");
6171 if (Op0Min.sge(Op1Max)) // A >=s B -> true if min(A) >= max(B)
6172 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6173 if (Op0Max.slt(Op1Min)) // A >=s B -> false if max(A) < min(B)
6174 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
6176 case ICmpInst::ICMP_SLE:
6177 assert(!isa<ConstantInt>(Op1) && "ICMP_SLE with ConstantInt not folded!");
6178 if (Op0Max.sle(Op1Min)) // A <=s B -> true if max(A) <= min(B)
6179 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6180 if (Op0Min.sgt(Op1Max)) // A <=s B -> false if min(A) > max(B)
6181 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
6183 case ICmpInst::ICMP_UGE:
6184 assert(!isa<ConstantInt>(Op1) && "ICMP_UGE with ConstantInt not folded!");
6185 if (Op0Min.uge(Op1Max)) // A >=u B -> true if min(A) >= max(B)
6186 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6187 if (Op0Max.ult(Op1Min)) // A >=u B -> false if max(A) < min(B)
6188 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
6190 case ICmpInst::ICMP_ULE:
6191 assert(!isa<ConstantInt>(Op1) && "ICMP_ULE with ConstantInt not folded!");
6192 if (Op0Max.ule(Op1Min)) // A <=u B -> true if max(A) <= min(B)
6193 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6194 if (Op0Min.ugt(Op1Max)) // A <=u B -> false if min(A) > max(B)
6195 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
6199 // Turn a signed comparison into an unsigned one if both operands
6200 // are known to have the same sign.
6201 if (I.isSignedPredicate() &&
6202 ((Op0KnownZero.isNegative() && Op1KnownZero.isNegative()) ||
6203 (Op0KnownOne.isNegative() && Op1KnownOne.isNegative())))
6204 return new ICmpInst(I.getUnsignedPredicate(), Op0, Op1);
6207 // Test if the ICmpInst instruction is used exclusively by a select as
6208 // part of a minimum or maximum operation. If so, refrain from doing
6209 // any other folding. This helps out other analyses which understand
6210 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
6211 // and CodeGen. And in this case, at least one of the comparison
6212 // operands has at least one user besides the compare (the select),
6213 // which would often largely negate the benefit of folding anyway.
6215 if (SelectInst *SI = dyn_cast<SelectInst>(*I.use_begin()))
6216 if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) ||
6217 (SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1))
6220 // See if we are doing a comparison between a constant and an instruction that
6221 // can be folded into the comparison.
6222 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
6223 // Since the RHS is a ConstantInt (CI), if the left hand side is an
6224 // instruction, see if that instruction also has constants so that the
6225 // instruction can be folded into the icmp
6226 if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
6227 if (Instruction *Res = visitICmpInstWithInstAndIntCst(I, LHSI, CI))
6231 // Handle icmp with constant (but not simple integer constant) RHS
6232 if (Constant *RHSC = dyn_cast<Constant>(Op1)) {
6233 if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
6234 switch (LHSI->getOpcode()) {
6235 case Instruction::GetElementPtr:
6236 if (RHSC->isNullValue()) {
6237 // icmp pred GEP (P, int 0, int 0, int 0), null -> icmp pred P, null
6238 bool isAllZeros = true;
6239 for (unsigned i = 1, e = LHSI->getNumOperands(); i != e; ++i)
6240 if (!isa<Constant>(LHSI->getOperand(i)) ||
6241 !cast<Constant>(LHSI->getOperand(i))->isNullValue()) {
6246 return new ICmpInst(I.getPredicate(), LHSI->getOperand(0),
6247 Constant::getNullValue(LHSI->getOperand(0)->getType()));
6251 case Instruction::PHI:
6252 // Only fold icmp into the PHI if the phi and icmp are in the same
6253 // block. If in the same block, we're encouraging jump threading. If
6254 // not, we are just pessimizing the code by making an i1 phi.
6255 if (LHSI->getParent() == I.getParent())
6256 if (Instruction *NV = FoldOpIntoPhi(I, true))
6259 case Instruction::Select: {
6260 // If either operand of the select is a constant, we can fold the
6261 // comparison into the select arms, which will cause one to be
6262 // constant folded and the select turned into a bitwise or.
6263 Value *Op1 = 0, *Op2 = 0;
6264 if (LHSI->hasOneUse()) {
6265 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) {
6266 // Fold the known value into the constant operand.
6267 Op1 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
6268 // Insert a new ICmp of the other select operand.
6269 Op2 = Builder->CreateICmp(I.getPredicate(), LHSI->getOperand(2),
6271 } else if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) {
6272 // Fold the known value into the constant operand.
6273 Op2 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
6274 // Insert a new ICmp of the other select operand.
6275 Op1 = Builder->CreateICmp(I.getPredicate(), LHSI->getOperand(1),
6281 return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
6284 case Instruction::Malloc:
6285 // If we have (malloc != null), and if the malloc has a single use, we
6286 // can assume it is successful and remove the malloc.
6287 if (LHSI->hasOneUse() && isa<ConstantPointerNull>(RHSC)) {
6289 return ReplaceInstUsesWith(I,
6290 ConstantInt::get(Type::getInt1Ty(*Context),
6291 !I.isTrueWhenEqual()));
6294 case Instruction::Call:
6295 // If we have (malloc != null), and if the malloc has a single use, we
6296 // can assume it is successful and remove the malloc.
6297 if (isMalloc(LHSI) && LHSI->hasOneUse() &&
6298 isa<ConstantPointerNull>(RHSC)) {
6300 return ReplaceInstUsesWith(I,
6301 ConstantInt::get(Type::getInt1Ty(*Context),
6302 !I.isTrueWhenEqual()));
6308 // If we can optimize a 'icmp GEP, P' or 'icmp P, GEP', do so now.
6309 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op0))
6310 if (Instruction *NI = FoldGEPICmp(GEP, Op1, I.getPredicate(), I))
6312 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1))
6313 if (Instruction *NI = FoldGEPICmp(GEP, Op0,
6314 ICmpInst::getSwappedPredicate(I.getPredicate()), I))
6317 // Test to see if the operands of the icmp are casted versions of other
6318 // values. If the ptr->ptr cast can be stripped off both arguments, we do so
6320 if (BitCastInst *CI = dyn_cast<BitCastInst>(Op0)) {
6321 if (isa<PointerType>(Op0->getType()) &&
6322 (isa<Constant>(Op1) || isa<BitCastInst>(Op1))) {
6323 // We keep moving the cast from the left operand over to the right
6324 // operand, where it can often be eliminated completely.
6325 Op0 = CI->getOperand(0);
6327 // If operand #1 is a bitcast instruction, it must also be a ptr->ptr cast
6328 // so eliminate it as well.
6329 if (BitCastInst *CI2 = dyn_cast<BitCastInst>(Op1))
6330 Op1 = CI2->getOperand(0);
6332 // If Op1 is a constant, we can fold the cast into the constant.
6333 if (Op0->getType() != Op1->getType()) {
6334 if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
6335 Op1 = ConstantExpr::getBitCast(Op1C, Op0->getType());
6337 // Otherwise, cast the RHS right before the icmp
6338 Op1 = Builder->CreateBitCast(Op1, Op0->getType());
6341 return new ICmpInst(I.getPredicate(), Op0, Op1);
6345 if (isa<CastInst>(Op0)) {
6346 // Handle the special case of: icmp (cast bool to X), <cst>
6347 // This comes up when you have code like
6350 // For generality, we handle any zero-extension of any operand comparison
6351 // with a constant or another cast from the same type.
6352 if (isa<ConstantInt>(Op1) || isa<CastInst>(Op1))
6353 if (Instruction *R = visitICmpInstWithCastAndCast(I))
6357 // See if it's the same type of instruction on the left and right.
6358 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
6359 if (BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1)) {
6360 if (Op0I->getOpcode() == Op1I->getOpcode() && Op0I->hasOneUse() &&
6361 Op1I->hasOneUse() && Op0I->getOperand(1) == Op1I->getOperand(1)) {
6362 switch (Op0I->getOpcode()) {
6364 case Instruction::Add:
6365 case Instruction::Sub:
6366 case Instruction::Xor:
6367 if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b
6368 return new ICmpInst(I.getPredicate(), Op0I->getOperand(0),
6369 Op1I->getOperand(0));
6370 // icmp u/s (a ^ signbit), (b ^ signbit) --> icmp s/u a, b
6371 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
6372 if (CI->getValue().isSignBit()) {
6373 ICmpInst::Predicate Pred = I.isSignedPredicate()
6374 ? I.getUnsignedPredicate()
6375 : I.getSignedPredicate();
6376 return new ICmpInst(Pred, Op0I->getOperand(0),
6377 Op1I->getOperand(0));
6380 if (CI->getValue().isMaxSignedValue()) {
6381 ICmpInst::Predicate Pred = I.isSignedPredicate()
6382 ? I.getUnsignedPredicate()
6383 : I.getSignedPredicate();
6384 Pred = I.getSwappedPredicate(Pred);
6385 return new ICmpInst(Pred, Op0I->getOperand(0),
6386 Op1I->getOperand(0));
6390 case Instruction::Mul:
6391 if (!I.isEquality())
6394 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
6395 // a * Cst icmp eq/ne b * Cst --> a & Mask icmp b & Mask
6396 // Mask = -1 >> count-trailing-zeros(Cst).
6397 if (!CI->isZero() && !CI->isOne()) {
6398 const APInt &AP = CI->getValue();
6399 ConstantInt *Mask = ConstantInt::get(*Context,
6400 APInt::getLowBitsSet(AP.getBitWidth(),
6402 AP.countTrailingZeros()));
6403 Value *And1 = Builder->CreateAnd(Op0I->getOperand(0), Mask);
6404 Value *And2 = Builder->CreateAnd(Op1I->getOperand(0), Mask);
6405 return new ICmpInst(I.getPredicate(), And1, And2);
6414 // ~x < ~y --> y < x
6416 if (match(Op0, m_Not(m_Value(A))) &&
6417 match(Op1, m_Not(m_Value(B))))
6418 return new ICmpInst(I.getPredicate(), B, A);
6421 if (I.isEquality()) {
6422 Value *A, *B, *C, *D;
6424 // -x == -y --> x == y
6425 if (match(Op0, m_Neg(m_Value(A))) &&
6426 match(Op1, m_Neg(m_Value(B))))
6427 return new ICmpInst(I.getPredicate(), A, B);
6429 if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
6430 if (A == Op1 || B == Op1) { // (A^B) == A -> B == 0
6431 Value *OtherVal = A == Op1 ? B : A;
6432 return new ICmpInst(I.getPredicate(), OtherVal,
6433 Constant::getNullValue(A->getType()));
6436 if (match(Op1, m_Xor(m_Value(C), m_Value(D)))) {
6437 // A^c1 == C^c2 --> A == C^(c1^c2)
6438 ConstantInt *C1, *C2;
6439 if (match(B, m_ConstantInt(C1)) &&
6440 match(D, m_ConstantInt(C2)) && Op1->hasOneUse()) {
6442 ConstantInt::get(*Context, C1->getValue() ^ C2->getValue());
6443 Value *Xor = Builder->CreateXor(C, NC, "tmp");
6444 return new ICmpInst(I.getPredicate(), A, Xor);
6447 // A^B == A^D -> B == D
6448 if (A == C) return new ICmpInst(I.getPredicate(), B, D);
6449 if (A == D) return new ICmpInst(I.getPredicate(), B, C);
6450 if (B == C) return new ICmpInst(I.getPredicate(), A, D);
6451 if (B == D) return new ICmpInst(I.getPredicate(), A, C);
6455 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) &&
6456 (A == Op0 || B == Op0)) {
6457 // A == (A^B) -> B == 0
6458 Value *OtherVal = A == Op0 ? B : A;
6459 return new ICmpInst(I.getPredicate(), OtherVal,
6460 Constant::getNullValue(A->getType()));
6463 // (A-B) == A -> B == 0
6464 if (match(Op0, m_Sub(m_Specific(Op1), m_Value(B))))
6465 return new ICmpInst(I.getPredicate(), B,
6466 Constant::getNullValue(B->getType()));
6468 // A == (A-B) -> B == 0
6469 if (match(Op1, m_Sub(m_Specific(Op0), m_Value(B))))
6470 return new ICmpInst(I.getPredicate(), B,
6471 Constant::getNullValue(B->getType()));
6473 // (X&Z) == (Y&Z) -> (X^Y) & Z == 0
6474 if (Op0->hasOneUse() && Op1->hasOneUse() &&
6475 match(Op0, m_And(m_Value(A), m_Value(B))) &&
6476 match(Op1, m_And(m_Value(C), m_Value(D)))) {
6477 Value *X = 0, *Y = 0, *Z = 0;
6480 X = B; Y = D; Z = A;
6481 } else if (A == D) {
6482 X = B; Y = C; Z = A;
6483 } else if (B == C) {
6484 X = A; Y = D; Z = B;
6485 } else if (B == D) {
6486 X = A; Y = C; Z = B;
6489 if (X) { // Build (X^Y) & Z
6490 Op1 = Builder->CreateXor(X, Y, "tmp");
6491 Op1 = Builder->CreateAnd(Op1, Z, "tmp");
6492 I.setOperand(0, Op1);
6493 I.setOperand(1, Constant::getNullValue(Op1->getType()));
6498 return Changed ? &I : 0;
6502 /// FoldICmpDivCst - Fold "icmp pred, ([su]div X, DivRHS), CmpRHS" where DivRHS
6503 /// and CmpRHS are both known to be integer constants.
6504 Instruction *InstCombiner::FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
6505 ConstantInt *DivRHS) {
6506 ConstantInt *CmpRHS = cast<ConstantInt>(ICI.getOperand(1));
6507 const APInt &CmpRHSV = CmpRHS->getValue();
6509 // FIXME: If the operand types don't match the type of the divide
6510 // then don't attempt this transform. The code below doesn't have the
6511 // logic to deal with a signed divide and an unsigned compare (and
6512 // vice versa). This is because (x /s C1) <s C2 produces different
6513 // results than (x /s C1) <u C2 or (x /u C1) <s C2 or even
6514 // (x /u C1) <u C2. Simply casting the operands and result won't
6515 // work. :( The if statement below tests that condition and bails
6517 bool DivIsSigned = DivI->getOpcode() == Instruction::SDiv;
6518 if (!ICI.isEquality() && DivIsSigned != ICI.isSignedPredicate())
6520 if (DivRHS->isZero())
6521 return 0; // The ProdOV computation fails on divide by zero.
6522 if (DivIsSigned && DivRHS->isAllOnesValue())
6523 return 0; // The overflow computation also screws up here
6524 if (DivRHS->isOne())
6525 return 0; // Not worth bothering, and eliminates some funny cases
6528 // Compute Prod = CI * DivRHS. We are essentially solving an equation
6529 // of form X/C1=C2. We solve for X by multiplying C1 (DivRHS) and
6530 // C2 (CI). By solving for X we can turn this into a range check
6531 // instead of computing a divide.
6532 Constant *Prod = ConstantExpr::getMul(CmpRHS, DivRHS);
6534 // Determine if the product overflows by seeing if the product is
6535 // not equal to the divide. Make sure we do the same kind of divide
6536 // as in the LHS instruction that we're folding.
6537 bool ProdOV = (DivIsSigned ? ConstantExpr::getSDiv(Prod, DivRHS) :
6538 ConstantExpr::getUDiv(Prod, DivRHS)) != CmpRHS;
6540 // Get the ICmp opcode
6541 ICmpInst::Predicate Pred = ICI.getPredicate();
6543 // Figure out the interval that is being checked. For example, a comparison
6544 // like "X /u 5 == 0" is really checking that X is in the interval [0, 5).
6545 // Compute this interval based on the constants involved and the signedness of
6546 // the compare/divide. This computes a half-open interval, keeping track of
6547 // whether either value in the interval overflows. After analysis each
6548 // overflow variable is set to 0 if it's corresponding bound variable is valid
6549 // -1 if overflowed off the bottom end, or +1 if overflowed off the top end.
6550 int LoOverflow = 0, HiOverflow = 0;
6551 Constant *LoBound = 0, *HiBound = 0;
6553 if (!DivIsSigned) { // udiv
6554 // e.g. X/5 op 3 --> [15, 20)
6556 HiOverflow = LoOverflow = ProdOV;
6558 HiOverflow = AddWithOverflow(HiBound, LoBound, DivRHS, Context, false);
6559 } else if (DivRHS->getValue().isStrictlyPositive()) { // Divisor is > 0.
6560 if (CmpRHSV == 0) { // (X / pos) op 0
6561 // Can't overflow. e.g. X/2 op 0 --> [-1, 2)
6562 LoBound = cast<ConstantInt>(ConstantExpr::getNeg(SubOne(DivRHS)));
6564 } else if (CmpRHSV.isStrictlyPositive()) { // (X / pos) op pos
6565 LoBound = Prod; // e.g. X/5 op 3 --> [15, 20)
6566 HiOverflow = LoOverflow = ProdOV;
6568 HiOverflow = AddWithOverflow(HiBound, Prod, DivRHS, Context, true);
6569 } else { // (X / pos) op neg
6570 // e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14)
6571 HiBound = AddOne(Prod);
6572 LoOverflow = HiOverflow = ProdOV ? -1 : 0;
6574 ConstantInt* DivNeg =
6575 cast<ConstantInt>(ConstantExpr::getNeg(DivRHS));
6576 LoOverflow = AddWithOverflow(LoBound, HiBound, DivNeg, Context,
6580 } else if (DivRHS->getValue().isNegative()) { // Divisor is < 0.
6581 if (CmpRHSV == 0) { // (X / neg) op 0
6582 // e.g. X/-5 op 0 --> [-4, 5)
6583 LoBound = AddOne(DivRHS);
6584 HiBound = cast<ConstantInt>(ConstantExpr::getNeg(DivRHS));
6585 if (HiBound == DivRHS) { // -INTMIN = INTMIN
6586 HiOverflow = 1; // [INTMIN+1, overflow)
6587 HiBound = 0; // e.g. X/INTMIN = 0 --> X > INTMIN
6589 } else if (CmpRHSV.isStrictlyPositive()) { // (X / neg) op pos
6590 // e.g. X/-5 op 3 --> [-19, -14)
6591 HiBound = AddOne(Prod);
6592 HiOverflow = LoOverflow = ProdOV ? -1 : 0;
6594 LoOverflow = AddWithOverflow(LoBound, HiBound,
6595 DivRHS, Context, true) ? -1 : 0;
6596 } else { // (X / neg) op neg
6597 LoBound = Prod; // e.g. X/-5 op -3 --> [15, 20)
6598 LoOverflow = HiOverflow = ProdOV;
6600 HiOverflow = SubWithOverflow(HiBound, Prod, DivRHS, Context, true);
6603 // Dividing by a negative swaps the condition. LT <-> GT
6604 Pred = ICmpInst::getSwappedPredicate(Pred);
6607 Value *X = DivI->getOperand(0);
6609 default: llvm_unreachable("Unhandled icmp opcode!");
6610 case ICmpInst::ICMP_EQ:
6611 if (LoOverflow && HiOverflow)
6612 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(*Context));
6613 else if (HiOverflow)
6614 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
6615 ICmpInst::ICMP_UGE, X, LoBound);
6616 else if (LoOverflow)
6617 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
6618 ICmpInst::ICMP_ULT, X, HiBound);
6620 return InsertRangeTest(X, LoBound, HiBound, DivIsSigned, true, ICI);
6621 case ICmpInst::ICMP_NE:
6622 if (LoOverflow && HiOverflow)
6623 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(*Context));
6624 else if (HiOverflow)
6625 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
6626 ICmpInst::ICMP_ULT, X, LoBound);
6627 else if (LoOverflow)
6628 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
6629 ICmpInst::ICMP_UGE, X, HiBound);
6631 return InsertRangeTest(X, LoBound, HiBound, DivIsSigned, false, ICI);
6632 case ICmpInst::ICMP_ULT:
6633 case ICmpInst::ICMP_SLT:
6634 if (LoOverflow == +1) // Low bound is greater than input range.
6635 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(*Context));
6636 if (LoOverflow == -1) // Low bound is less than input range.
6637 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(*Context));
6638 return new ICmpInst(Pred, X, LoBound);
6639 case ICmpInst::ICMP_UGT:
6640 case ICmpInst::ICMP_SGT:
6641 if (HiOverflow == +1) // High bound greater than input range.
6642 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(*Context));
6643 else if (HiOverflow == -1) // High bound less than input range.
6644 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(*Context));
6645 if (Pred == ICmpInst::ICMP_UGT)
6646 return new ICmpInst(ICmpInst::ICMP_UGE, X, HiBound);
6648 return new ICmpInst(ICmpInst::ICMP_SGE, X, HiBound);
6653 /// visitICmpInstWithInstAndIntCst - Handle "icmp (instr, intcst)".
6655 Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
6658 const APInt &RHSV = RHS->getValue();
6660 switch (LHSI->getOpcode()) {
6661 case Instruction::Trunc:
6662 if (ICI.isEquality() && LHSI->hasOneUse()) {
6663 // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all
6664 // of the high bits truncated out of x are known.
6665 unsigned DstBits = LHSI->getType()->getPrimitiveSizeInBits(),
6666 SrcBits = LHSI->getOperand(0)->getType()->getPrimitiveSizeInBits();
6667 APInt Mask(APInt::getHighBitsSet(SrcBits, SrcBits-DstBits));
6668 APInt KnownZero(SrcBits, 0), KnownOne(SrcBits, 0);
6669 ComputeMaskedBits(LHSI->getOperand(0), Mask, KnownZero, KnownOne);
6671 // If all the high bits are known, we can do this xform.
6672 if ((KnownZero|KnownOne).countLeadingOnes() >= SrcBits-DstBits) {
6673 // Pull in the high bits from known-ones set.
6674 APInt NewRHS(RHS->getValue());
6675 NewRHS.zext(SrcBits);
6677 return new ICmpInst(ICI.getPredicate(), LHSI->getOperand(0),
6678 ConstantInt::get(*Context, NewRHS));
6683 case Instruction::Xor: // (icmp pred (xor X, XorCST), CI)
6684 if (ConstantInt *XorCST = dyn_cast<ConstantInt>(LHSI->getOperand(1))) {
6685 // If this is a comparison that tests the signbit (X < 0) or (x > -1),
6687 if ((ICI.getPredicate() == ICmpInst::ICMP_SLT && RHSV == 0) ||
6688 (ICI.getPredicate() == ICmpInst::ICMP_SGT && RHSV.isAllOnesValue())) {
6689 Value *CompareVal = LHSI->getOperand(0);
6691 // If the sign bit of the XorCST is not set, there is no change to
6692 // the operation, just stop using the Xor.
6693 if (!XorCST->getValue().isNegative()) {
6694 ICI.setOperand(0, CompareVal);
6699 // Was the old condition true if the operand is positive?
6700 bool isTrueIfPositive = ICI.getPredicate() == ICmpInst::ICMP_SGT;
6702 // If so, the new one isn't.
6703 isTrueIfPositive ^= true;
6705 if (isTrueIfPositive)
6706 return new ICmpInst(ICmpInst::ICMP_SGT, CompareVal,
6709 return new ICmpInst(ICmpInst::ICMP_SLT, CompareVal,
6713 if (LHSI->hasOneUse()) {
6714 // (icmp u/s (xor A SignBit), C) -> (icmp s/u A, (xor C SignBit))
6715 if (!ICI.isEquality() && XorCST->getValue().isSignBit()) {
6716 const APInt &SignBit = XorCST->getValue();
6717 ICmpInst::Predicate Pred = ICI.isSignedPredicate()
6718 ? ICI.getUnsignedPredicate()
6719 : ICI.getSignedPredicate();
6720 return new ICmpInst(Pred, LHSI->getOperand(0),
6721 ConstantInt::get(*Context, RHSV ^ SignBit));
6724 // (icmp u/s (xor A ~SignBit), C) -> (icmp s/u (xor C ~SignBit), A)
6725 if (!ICI.isEquality() && XorCST->getValue().isMaxSignedValue()) {
6726 const APInt &NotSignBit = XorCST->getValue();
6727 ICmpInst::Predicate Pred = ICI.isSignedPredicate()
6728 ? ICI.getUnsignedPredicate()
6729 : ICI.getSignedPredicate();
6730 Pred = ICI.getSwappedPredicate(Pred);
6731 return new ICmpInst(Pred, LHSI->getOperand(0),
6732 ConstantInt::get(*Context, RHSV ^ NotSignBit));
6737 case Instruction::And: // (icmp pred (and X, AndCST), RHS)
6738 if (LHSI->hasOneUse() && isa<ConstantInt>(LHSI->getOperand(1)) &&
6739 LHSI->getOperand(0)->hasOneUse()) {
6740 ConstantInt *AndCST = cast<ConstantInt>(LHSI->getOperand(1));
6742 // If the LHS is an AND of a truncating cast, we can widen the
6743 // and/compare to be the input width without changing the value
6744 // produced, eliminating a cast.
6745 if (TruncInst *Cast = dyn_cast<TruncInst>(LHSI->getOperand(0))) {
6746 // We can do this transformation if either the AND constant does not
6747 // have its sign bit set or if it is an equality comparison.
6748 // Extending a relational comparison when we're checking the sign
6749 // bit would not work.
6750 if (Cast->hasOneUse() &&
6751 (ICI.isEquality() ||
6752 (AndCST->getValue().isNonNegative() && RHSV.isNonNegative()))) {
6754 cast<IntegerType>(Cast->getOperand(0)->getType())->getBitWidth();
6755 APInt NewCST = AndCST->getValue();
6756 NewCST.zext(BitWidth);
6758 NewCI.zext(BitWidth);
6760 Builder->CreateAnd(Cast->getOperand(0),
6761 ConstantInt::get(*Context, NewCST), LHSI->getName());
6762 return new ICmpInst(ICI.getPredicate(), NewAnd,
6763 ConstantInt::get(*Context, NewCI));
6767 // If this is: (X >> C1) & C2 != C3 (where any shift and any compare
6768 // could exist), turn it into (X & (C2 << C1)) != (C3 << C1). This
6769 // happens a LOT in code produced by the C front-end, for bitfield
6771 BinaryOperator *Shift = dyn_cast<BinaryOperator>(LHSI->getOperand(0));
6772 if (Shift && !Shift->isShift())
6776 ShAmt = Shift ? dyn_cast<ConstantInt>(Shift->getOperand(1)) : 0;
6777 const Type *Ty = Shift ? Shift->getType() : 0; // Type of the shift.
6778 const Type *AndTy = AndCST->getType(); // Type of the and.
6780 // We can fold this as long as we can't shift unknown bits
6781 // into the mask. This can only happen with signed shift
6782 // rights, as they sign-extend.
6784 bool CanFold = Shift->isLogicalShift();
6786 // To test for the bad case of the signed shr, see if any
6787 // of the bits shifted in could be tested after the mask.
6788 uint32_t TyBits = Ty->getPrimitiveSizeInBits();
6789 int ShAmtVal = TyBits - ShAmt->getLimitedValue(TyBits);
6791 uint32_t BitWidth = AndTy->getPrimitiveSizeInBits();
6792 if ((APInt::getHighBitsSet(BitWidth, BitWidth-ShAmtVal) &
6793 AndCST->getValue()) == 0)
6799 if (Shift->getOpcode() == Instruction::Shl)
6800 NewCst = ConstantExpr::getLShr(RHS, ShAmt);
6802 NewCst = ConstantExpr::getShl(RHS, ShAmt);
6804 // Check to see if we are shifting out any of the bits being
6806 if (ConstantExpr::get(Shift->getOpcode(),
6807 NewCst, ShAmt) != RHS) {
6808 // If we shifted bits out, the fold is not going to work out.
6809 // As a special case, check to see if this means that the
6810 // result is always true or false now.
6811 if (ICI.getPredicate() == ICmpInst::ICMP_EQ)
6812 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(*Context));
6813 if (ICI.getPredicate() == ICmpInst::ICMP_NE)
6814 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(*Context));
6816 ICI.setOperand(1, NewCst);
6817 Constant *NewAndCST;
6818 if (Shift->getOpcode() == Instruction::Shl)
6819 NewAndCST = ConstantExpr::getLShr(AndCST, ShAmt);
6821 NewAndCST = ConstantExpr::getShl(AndCST, ShAmt);
6822 LHSI->setOperand(1, NewAndCST);
6823 LHSI->setOperand(0, Shift->getOperand(0));
6824 Worklist.Add(Shift); // Shift is dead.
6830 // Turn ((X >> Y) & C) == 0 into (X & (C << Y)) == 0. The later is
6831 // preferable because it allows the C<<Y expression to be hoisted out
6832 // of a loop if Y is invariant and X is not.
6833 if (Shift && Shift->hasOneUse() && RHSV == 0 &&
6834 ICI.isEquality() && !Shift->isArithmeticShift() &&
6835 !isa<Constant>(Shift->getOperand(0))) {
6838 if (Shift->getOpcode() == Instruction::LShr) {
6839 NS = Builder->CreateShl(AndCST, Shift->getOperand(1), "tmp");
6841 // Insert a logical shift.
6842 NS = Builder->CreateLShr(AndCST, Shift->getOperand(1), "tmp");
6845 // Compute X & (C << Y).
6847 Builder->CreateAnd(Shift->getOperand(0), NS, LHSI->getName());
6849 ICI.setOperand(0, NewAnd);
6855 case Instruction::Shl: { // (icmp pred (shl X, ShAmt), CI)
6856 ConstantInt *ShAmt = dyn_cast<ConstantInt>(LHSI->getOperand(1));
6859 uint32_t TypeBits = RHSV.getBitWidth();
6861 // Check that the shift amount is in range. If not, don't perform
6862 // undefined shifts. When the shift is visited it will be
6864 if (ShAmt->uge(TypeBits))
6867 if (ICI.isEquality()) {
6868 // If we are comparing against bits always shifted out, the
6869 // comparison cannot succeed.
6871 ConstantExpr::getShl(ConstantExpr::getLShr(RHS, ShAmt),
6873 if (Comp != RHS) {// Comparing against a bit that we know is zero.
6874 bool IsICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
6875 Constant *Cst = ConstantInt::get(Type::getInt1Ty(*Context), IsICMP_NE);
6876 return ReplaceInstUsesWith(ICI, Cst);
6879 if (LHSI->hasOneUse()) {
6880 // Otherwise strength reduce the shift into an and.
6881 uint32_t ShAmtVal = (uint32_t)ShAmt->getLimitedValue(TypeBits);
6883 ConstantInt::get(*Context, APInt::getLowBitsSet(TypeBits,
6884 TypeBits-ShAmtVal));
6887 Builder->CreateAnd(LHSI->getOperand(0),Mask, LHSI->getName()+".mask");
6888 return new ICmpInst(ICI.getPredicate(), And,
6889 ConstantInt::get(*Context, RHSV.lshr(ShAmtVal)));
6893 // Otherwise, if this is a comparison of the sign bit, simplify to and/test.
6894 bool TrueIfSigned = false;
6895 if (LHSI->hasOneUse() &&
6896 isSignBitCheck(ICI.getPredicate(), RHS, TrueIfSigned)) {
6897 // (X << 31) <s 0 --> (X&1) != 0
6898 Constant *Mask = ConstantInt::get(*Context, APInt(TypeBits, 1) <<
6899 (TypeBits-ShAmt->getZExtValue()-1));
6901 Builder->CreateAnd(LHSI->getOperand(0), Mask, LHSI->getName()+".mask");
6902 return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ,
6903 And, Constant::getNullValue(And->getType()));
6908 case Instruction::LShr: // (icmp pred (shr X, ShAmt), CI)
6909 case Instruction::AShr: {
6910 // Only handle equality comparisons of shift-by-constant.
6911 ConstantInt *ShAmt = dyn_cast<ConstantInt>(LHSI->getOperand(1));
6912 if (!ShAmt || !ICI.isEquality()) break;
6914 // Check that the shift amount is in range. If not, don't perform
6915 // undefined shifts. When the shift is visited it will be
6917 uint32_t TypeBits = RHSV.getBitWidth();
6918 if (ShAmt->uge(TypeBits))
6921 uint32_t ShAmtVal = (uint32_t)ShAmt->getLimitedValue(TypeBits);
6923 // If we are comparing against bits always shifted out, the
6924 // comparison cannot succeed.
6925 APInt Comp = RHSV << ShAmtVal;
6926 if (LHSI->getOpcode() == Instruction::LShr)
6927 Comp = Comp.lshr(ShAmtVal);
6929 Comp = Comp.ashr(ShAmtVal);
6931 if (Comp != RHSV) { // Comparing against a bit that we know is zero.
6932 bool IsICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
6933 Constant *Cst = ConstantInt::get(Type::getInt1Ty(*Context), IsICMP_NE);
6934 return ReplaceInstUsesWith(ICI, Cst);
6937 // Otherwise, check to see if the bits shifted out are known to be zero.
6938 // If so, we can compare against the unshifted value:
6939 // (X & 4) >> 1 == 2 --> (X & 4) == 4.
6940 if (LHSI->hasOneUse() &&
6941 MaskedValueIsZero(LHSI->getOperand(0),
6942 APInt::getLowBitsSet(Comp.getBitWidth(), ShAmtVal))) {
6943 return new ICmpInst(ICI.getPredicate(), LHSI->getOperand(0),
6944 ConstantExpr::getShl(RHS, ShAmt));
6947 if (LHSI->hasOneUse()) {
6948 // Otherwise strength reduce the shift into an and.
6949 APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal));
6950 Constant *Mask = ConstantInt::get(*Context, Val);
6952 Value *And = Builder->CreateAnd(LHSI->getOperand(0),
6953 Mask, LHSI->getName()+".mask");
6954 return new ICmpInst(ICI.getPredicate(), And,
6955 ConstantExpr::getShl(RHS, ShAmt));
6960 case Instruction::SDiv:
6961 case Instruction::UDiv:
6962 // Fold: icmp pred ([us]div X, C1), C2 -> range test
6963 // Fold this div into the comparison, producing a range check.
6964 // Determine, based on the divide type, what the range is being
6965 // checked. If there is an overflow on the low or high side, remember
6966 // it, otherwise compute the range [low, hi) bounding the new value.
6967 // See: InsertRangeTest above for the kinds of replacements possible.
6968 if (ConstantInt *DivRHS = dyn_cast<ConstantInt>(LHSI->getOperand(1)))
6969 if (Instruction *R = FoldICmpDivCst(ICI, cast<BinaryOperator>(LHSI),
6974 case Instruction::Add:
6975 // Fold: icmp pred (add, X, C1), C2
6977 if (!ICI.isEquality()) {
6978 ConstantInt *LHSC = dyn_cast<ConstantInt>(LHSI->getOperand(1));
6980 const APInt &LHSV = LHSC->getValue();
6982 ConstantRange CR = ICI.makeConstantRange(ICI.getPredicate(), RHSV)
6985 if (ICI.isSignedPredicate()) {
6986 if (CR.getLower().isSignBit()) {
6987 return new ICmpInst(ICmpInst::ICMP_SLT, LHSI->getOperand(0),
6988 ConstantInt::get(*Context, CR.getUpper()));
6989 } else if (CR.getUpper().isSignBit()) {
6990 return new ICmpInst(ICmpInst::ICMP_SGE, LHSI->getOperand(0),
6991 ConstantInt::get(*Context, CR.getLower()));
6994 if (CR.getLower().isMinValue()) {
6995 return new ICmpInst(ICmpInst::ICMP_ULT, LHSI->getOperand(0),
6996 ConstantInt::get(*Context, CR.getUpper()));
6997 } else if (CR.getUpper().isMinValue()) {
6998 return new ICmpInst(ICmpInst::ICMP_UGE, LHSI->getOperand(0),
6999 ConstantInt::get(*Context, CR.getLower()));
7006 // Simplify icmp_eq and icmp_ne instructions with integer constant RHS.
7007 if (ICI.isEquality()) {
7008 bool isICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
7010 // If the first operand is (add|sub|and|or|xor|rem) with a constant, and
7011 // the second operand is a constant, simplify a bit.
7012 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(LHSI)) {
7013 switch (BO->getOpcode()) {
7014 case Instruction::SRem:
7015 // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one.
7016 if (RHSV == 0 && isa<ConstantInt>(BO->getOperand(1)) &&BO->hasOneUse()){
7017 const APInt &V = cast<ConstantInt>(BO->getOperand(1))->getValue();
7018 if (V.sgt(APInt(V.getBitWidth(), 1)) && V.isPowerOf2()) {
7020 Builder->CreateURem(BO->getOperand(0), BO->getOperand(1),
7022 return new ICmpInst(ICI.getPredicate(), NewRem,
7023 Constant::getNullValue(BO->getType()));
7027 case Instruction::Add:
7028 // Replace ((add A, B) != C) with (A != C-B) if B & C are constants.
7029 if (ConstantInt *BOp1C = dyn_cast<ConstantInt>(BO->getOperand(1))) {
7030 if (BO->hasOneUse())
7031 return new ICmpInst(ICI.getPredicate(), BO->getOperand(0),
7032 ConstantExpr::getSub(RHS, BOp1C));
7033 } else if (RHSV == 0) {
7034 // Replace ((add A, B) != 0) with (A != -B) if A or B is
7035 // efficiently invertible, or if the add has just this one use.
7036 Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1);
7038 if (Value *NegVal = dyn_castNegVal(BOp1))
7039 return new ICmpInst(ICI.getPredicate(), BOp0, NegVal);
7040 else if (Value *NegVal = dyn_castNegVal(BOp0))
7041 return new ICmpInst(ICI.getPredicate(), NegVal, BOp1);
7042 else if (BO->hasOneUse()) {
7043 Value *Neg = Builder->CreateNeg(BOp1);
7045 return new ICmpInst(ICI.getPredicate(), BOp0, Neg);
7049 case Instruction::Xor:
7050 // For the xor case, we can xor two constants together, eliminating
7051 // the explicit xor.
7052 if (Constant *BOC = dyn_cast<Constant>(BO->getOperand(1)))
7053 return new ICmpInst(ICI.getPredicate(), BO->getOperand(0),
7054 ConstantExpr::getXor(RHS, BOC));
7057 case Instruction::Sub:
7058 // Replace (([sub|xor] A, B) != 0) with (A != B)
7060 return new ICmpInst(ICI.getPredicate(), BO->getOperand(0),
7064 case Instruction::Or:
7065 // If bits are being or'd in that are not present in the constant we
7066 // are comparing against, then the comparison could never succeed!
7067 if (Constant *BOC = dyn_cast<Constant>(BO->getOperand(1))) {
7068 Constant *NotCI = ConstantExpr::getNot(RHS);
7069 if (!ConstantExpr::getAnd(BOC, NotCI)->isNullValue())
7070 return ReplaceInstUsesWith(ICI,
7071 ConstantInt::get(Type::getInt1Ty(*Context),
7076 case Instruction::And:
7077 if (ConstantInt *BOC = dyn_cast<ConstantInt>(BO->getOperand(1))) {
7078 // If bits are being compared against that are and'd out, then the
7079 // comparison can never succeed!
7080 if ((RHSV & ~BOC->getValue()) != 0)
7081 return ReplaceInstUsesWith(ICI,
7082 ConstantInt::get(Type::getInt1Ty(*Context),
7085 // If we have ((X & C) == C), turn it into ((X & C) != 0).
7086 if (RHS == BOC && RHSV.isPowerOf2())
7087 return new ICmpInst(isICMP_NE ? ICmpInst::ICMP_EQ :
7088 ICmpInst::ICMP_NE, LHSI,
7089 Constant::getNullValue(RHS->getType()));
7091 // Replace (and X, (1 << size(X)-1) != 0) with x s< 0
7092 if (BOC->getValue().isSignBit()) {
7093 Value *X = BO->getOperand(0);
7094 Constant *Zero = Constant::getNullValue(X->getType());
7095 ICmpInst::Predicate pred = isICMP_NE ?
7096 ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE;
7097 return new ICmpInst(pred, X, Zero);
7100 // ((X & ~7) == 0) --> X < 8
7101 if (RHSV == 0 && isHighOnes(BOC)) {
7102 Value *X = BO->getOperand(0);
7103 Constant *NegX = ConstantExpr::getNeg(BOC);
7104 ICmpInst::Predicate pred = isICMP_NE ?
7105 ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
7106 return new ICmpInst(pred, X, NegX);
7111 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(LHSI)) {
7112 // Handle icmp {eq|ne} <intrinsic>, intcst.
7113 if (II->getIntrinsicID() == Intrinsic::bswap) {
7115 ICI.setOperand(0, II->getOperand(1));
7116 ICI.setOperand(1, ConstantInt::get(*Context, RHSV.byteSwap()));
7124 /// visitICmpInstWithCastAndCast - Handle icmp (cast x to y), (cast/cst).
7125 /// We only handle extending casts so far.
7127 Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
7128 const CastInst *LHSCI = cast<CastInst>(ICI.getOperand(0));
7129 Value *LHSCIOp = LHSCI->getOperand(0);
7130 const Type *SrcTy = LHSCIOp->getType();
7131 const Type *DestTy = LHSCI->getType();
7134 // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
7135 // integer type is the same size as the pointer type.
7136 if (TD && LHSCI->getOpcode() == Instruction::PtrToInt &&
7137 TD->getPointerSizeInBits() ==
7138 cast<IntegerType>(DestTy)->getBitWidth()) {
7140 if (Constant *RHSC = dyn_cast<Constant>(ICI.getOperand(1))) {
7141 RHSOp = ConstantExpr::getIntToPtr(RHSC, SrcTy);
7142 } else if (PtrToIntInst *RHSC = dyn_cast<PtrToIntInst>(ICI.getOperand(1))) {
7143 RHSOp = RHSC->getOperand(0);
7144 // If the pointer types don't match, insert a bitcast.
7145 if (LHSCIOp->getType() != RHSOp->getType())
7146 RHSOp = Builder->CreateBitCast(RHSOp, LHSCIOp->getType());
7150 return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSOp);
7153 // The code below only handles extension cast instructions, so far.
7155 if (LHSCI->getOpcode() != Instruction::ZExt &&
7156 LHSCI->getOpcode() != Instruction::SExt)
7159 bool isSignedExt = LHSCI->getOpcode() == Instruction::SExt;
7160 bool isSignedCmp = ICI.isSignedPredicate();
7162 if (CastInst *CI = dyn_cast<CastInst>(ICI.getOperand(1))) {
7163 // Not an extension from the same type?
7164 RHSCIOp = CI->getOperand(0);
7165 if (RHSCIOp->getType() != LHSCIOp->getType())
7168 // If the signedness of the two casts doesn't agree (i.e. one is a sext
7169 // and the other is a zext), then we can't handle this.
7170 if (CI->getOpcode() != LHSCI->getOpcode())
7173 // Deal with equality cases early.
7174 if (ICI.isEquality())
7175 return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSCIOp);
7177 // A signed comparison of sign extended values simplifies into a
7178 // signed comparison.
7179 if (isSignedCmp && isSignedExt)
7180 return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSCIOp);
7182 // The other three cases all fold into an unsigned comparison.
7183 return new ICmpInst(ICI.getUnsignedPredicate(), LHSCIOp, RHSCIOp);
7186 // If we aren't dealing with a constant on the RHS, exit early
7187 ConstantInt *CI = dyn_cast<ConstantInt>(ICI.getOperand(1));
7191 // Compute the constant that would happen if we truncated to SrcTy then
7192 // reextended to DestTy.
7193 Constant *Res1 = ConstantExpr::getTrunc(CI, SrcTy);
7194 Constant *Res2 = ConstantExpr::getCast(LHSCI->getOpcode(),
7197 // If the re-extended constant didn't change...
7199 // Make sure that sign of the Cmp and the sign of the Cast are the same.
7200 // For example, we might have:
7201 // %A = sext i16 %X to i32
7202 // %B = icmp ugt i32 %A, 1330
7203 // It is incorrect to transform this into
7204 // %B = icmp ugt i16 %X, 1330
7205 // because %A may have negative value.
7207 // However, we allow this when the compare is EQ/NE, because they are
7209 if (isSignedExt == isSignedCmp || ICI.isEquality())
7210 return new ICmpInst(ICI.getPredicate(), LHSCIOp, Res1);
7214 // The re-extended constant changed so the constant cannot be represented
7215 // in the shorter type. Consequently, we cannot emit a simple comparison.
7217 // First, handle some easy cases. We know the result cannot be equal at this
7218 // point so handle the ICI.isEquality() cases
7219 if (ICI.getPredicate() == ICmpInst::ICMP_EQ)
7220 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(*Context));
7221 if (ICI.getPredicate() == ICmpInst::ICMP_NE)
7222 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(*Context));
7224 // Evaluate the comparison for LT (we invert for GT below). LE and GE cases
7225 // should have been folded away previously and not enter in here.
7228 // We're performing a signed comparison.
7229 if (cast<ConstantInt>(CI)->getValue().isNegative())
7230 Result = ConstantInt::getFalse(*Context); // X < (small) --> false
7232 Result = ConstantInt::getTrue(*Context); // X < (large) --> true
7234 // We're performing an unsigned comparison.
7236 // We're performing an unsigned comp with a sign extended value.
7237 // This is true if the input is >= 0. [aka >s -1]
7238 Constant *NegOne = Constant::getAllOnesValue(SrcTy);
7239 Result = Builder->CreateICmpSGT(LHSCIOp, NegOne, ICI.getName());
7241 // Unsigned extend & unsigned compare -> always true.
7242 Result = ConstantInt::getTrue(*Context);
7246 // Finally, return the value computed.
7247 if (ICI.getPredicate() == ICmpInst::ICMP_ULT ||
7248 ICI.getPredicate() == ICmpInst::ICMP_SLT)
7249 return ReplaceInstUsesWith(ICI, Result);
7251 assert((ICI.getPredicate()==ICmpInst::ICMP_UGT ||
7252 ICI.getPredicate()==ICmpInst::ICMP_SGT) &&
7253 "ICmp should be folded!");
7254 if (Constant *CI = dyn_cast<Constant>(Result))
7255 return ReplaceInstUsesWith(ICI, ConstantExpr::getNot(CI));
7256 return BinaryOperator::CreateNot(Result);
7259 Instruction *InstCombiner::visitShl(BinaryOperator &I) {
7260 return commonShiftTransforms(I);
7263 Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
7264 return commonShiftTransforms(I);
7267 Instruction *InstCombiner::visitAShr(BinaryOperator &I) {
7268 if (Instruction *R = commonShiftTransforms(I))
7271 Value *Op0 = I.getOperand(0);
7273 // ashr int -1, X = -1 (for any arithmetic shift rights of ~0)
7274 if (ConstantInt *CSI = dyn_cast<ConstantInt>(Op0))
7275 if (CSI->isAllOnesValue())
7276 return ReplaceInstUsesWith(I, CSI);
7278 // See if we can turn a signed shr into an unsigned shr.
7279 if (MaskedValueIsZero(Op0,
7280 APInt::getSignBit(I.getType()->getScalarSizeInBits())))
7281 return BinaryOperator::CreateLShr(Op0, I.getOperand(1));
7283 // Arithmetic shifting an all-sign-bit value is a no-op.
7284 unsigned NumSignBits = ComputeNumSignBits(Op0);
7285 if (NumSignBits == Op0->getType()->getScalarSizeInBits())
7286 return ReplaceInstUsesWith(I, Op0);
7291 Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) {
7292 assert(I.getOperand(1)->getType() == I.getOperand(0)->getType());
7293 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
7295 // shl X, 0 == X and shr X, 0 == X
7296 // shl 0, X == 0 and shr 0, X == 0
7297 if (Op1 == Constant::getNullValue(Op1->getType()) ||
7298 Op0 == Constant::getNullValue(Op0->getType()))
7299 return ReplaceInstUsesWith(I, Op0);
7301 if (isa<UndefValue>(Op0)) {
7302 if (I.getOpcode() == Instruction::AShr) // undef >>s X -> undef
7303 return ReplaceInstUsesWith(I, Op0);
7304 else // undef << X -> 0, undef >>u X -> 0
7305 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
7307 if (isa<UndefValue>(Op1)) {
7308 if (I.getOpcode() == Instruction::AShr) // X >>s undef -> X
7309 return ReplaceInstUsesWith(I, Op0);
7310 else // X << undef, X >>u undef -> 0
7311 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
7314 // See if we can fold away this shift.
7315 if (SimplifyDemandedInstructionBits(I))
7318 // Try to fold constant and into select arguments.
7319 if (isa<Constant>(Op0))
7320 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
7321 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
7324 if (ConstantInt *CUI = dyn_cast<ConstantInt>(Op1))
7325 if (Instruction *Res = FoldShiftByConstant(Op0, CUI, I))
7330 Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
7331 BinaryOperator &I) {
7332 bool isLeftShift = I.getOpcode() == Instruction::Shl;
7334 // See if we can simplify any instructions used by the instruction whose sole
7335 // purpose is to compute bits we don't care about.
7336 uint32_t TypeBits = Op0->getType()->getScalarSizeInBits();
7338 // shl i32 X, 32 = 0 and srl i8 Y, 9 = 0, ... just don't eliminate
7341 if (Op1->uge(TypeBits)) {
7342 if (I.getOpcode() != Instruction::AShr)
7343 return ReplaceInstUsesWith(I, Constant::getNullValue(Op0->getType()));
7345 I.setOperand(1, ConstantInt::get(I.getType(), TypeBits-1));
7350 // ((X*C1) << C2) == (X * (C1 << C2))
7351 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Op0))
7352 if (BO->getOpcode() == Instruction::Mul && isLeftShift)
7353 if (Constant *BOOp = dyn_cast<Constant>(BO->getOperand(1)))
7354 return BinaryOperator::CreateMul(BO->getOperand(0),
7355 ConstantExpr::getShl(BOOp, Op1));
7357 // Try to fold constant and into select arguments.
7358 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
7359 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
7361 if (isa<PHINode>(Op0))
7362 if (Instruction *NV = FoldOpIntoPhi(I))
7365 // Fold shift2(trunc(shift1(x,c1)), c2) -> trunc(shift2(shift1(x,c1),c2))
7366 if (TruncInst *TI = dyn_cast<TruncInst>(Op0)) {
7367 Instruction *TrOp = dyn_cast<Instruction>(TI->getOperand(0));
7368 // If 'shift2' is an ashr, we would have to get the sign bit into a funny
7369 // place. Don't try to do this transformation in this case. Also, we
7370 // require that the input operand is a shift-by-constant so that we have
7371 // confidence that the shifts will get folded together. We could do this
7372 // xform in more cases, but it is unlikely to be profitable.
7373 if (TrOp && I.isLogicalShift() && TrOp->isShift() &&
7374 isa<ConstantInt>(TrOp->getOperand(1))) {
7375 // Okay, we'll do this xform. Make the shift of shift.
7376 Constant *ShAmt = ConstantExpr::getZExt(Op1, TrOp->getType());
7377 // (shift2 (shift1 & 0x00FF), c2)
7378 Value *NSh = Builder->CreateBinOp(I.getOpcode(), TrOp, ShAmt,I.getName());
7380 // For logical shifts, the truncation has the effect of making the high
7381 // part of the register be zeros. Emulate this by inserting an AND to
7382 // clear the top bits as needed. This 'and' will usually be zapped by
7383 // other xforms later if dead.
7384 unsigned SrcSize = TrOp->getType()->getScalarSizeInBits();
7385 unsigned DstSize = TI->getType()->getScalarSizeInBits();
7386 APInt MaskV(APInt::getLowBitsSet(SrcSize, DstSize));
7388 // The mask we constructed says what the trunc would do if occurring
7389 // between the shifts. We want to know the effect *after* the second
7390 // shift. We know that it is a logical shift by a constant, so adjust the
7391 // mask as appropriate.
7392 if (I.getOpcode() == Instruction::Shl)
7393 MaskV <<= Op1->getZExtValue();
7395 assert(I.getOpcode() == Instruction::LShr && "Unknown logical shift");
7396 MaskV = MaskV.lshr(Op1->getZExtValue());
7400 Value *And = Builder->CreateAnd(NSh, ConstantInt::get(*Context, MaskV),
7403 // Return the value truncated to the interesting size.
7404 return new TruncInst(And, I.getType());
7408 if (Op0->hasOneUse()) {
7409 if (BinaryOperator *Op0BO = dyn_cast<BinaryOperator>(Op0)) {
7410 // Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
7413 switch (Op0BO->getOpcode()) {
7415 case Instruction::Add:
7416 case Instruction::And:
7417 case Instruction::Or:
7418 case Instruction::Xor: {
7419 // These operators commute.
7420 // Turn (Y + (X >> C)) << C -> (X + (Y << C)) & (~0 << C)
7421 if (isLeftShift && Op0BO->getOperand(1)->hasOneUse() &&
7422 match(Op0BO->getOperand(1), m_Shr(m_Value(V1),
7423 m_Specific(Op1)))) {
7424 Value *YS = // (Y << C)
7425 Builder->CreateShl(Op0BO->getOperand(0), Op1, Op0BO->getName());
7427 Value *X = Builder->CreateBinOp(Op0BO->getOpcode(), YS, V1,
7428 Op0BO->getOperand(1)->getName());
7429 uint32_t Op1Val = Op1->getLimitedValue(TypeBits);
7430 return BinaryOperator::CreateAnd(X, ConstantInt::get(*Context,
7431 APInt::getHighBitsSet(TypeBits, TypeBits-Op1Val)));
7434 // Turn (Y + ((X >> C) & CC)) << C -> ((X & (CC << C)) + (Y << C))
7435 Value *Op0BOOp1 = Op0BO->getOperand(1);
7436 if (isLeftShift && Op0BOOp1->hasOneUse() &&
7438 m_And(m_Shr(m_Value(V1), m_Specific(Op1)),
7439 m_ConstantInt(CC))) &&
7440 cast<BinaryOperator>(Op0BOOp1)->getOperand(0)->hasOneUse()) {
7441 Value *YS = // (Y << C)
7442 Builder->CreateShl(Op0BO->getOperand(0), Op1,
7445 Value *XM = Builder->CreateAnd(V1, ConstantExpr::getShl(CC, Op1),
7446 V1->getName()+".mask");
7447 return BinaryOperator::Create(Op0BO->getOpcode(), YS, XM);
7452 case Instruction::Sub: {
7453 // Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
7454 if (isLeftShift && Op0BO->getOperand(0)->hasOneUse() &&
7455 match(Op0BO->getOperand(0), m_Shr(m_Value(V1),
7456 m_Specific(Op1)))) {
7457 Value *YS = // (Y << C)
7458 Builder->CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName());
7460 Value *X = Builder->CreateBinOp(Op0BO->getOpcode(), V1, YS,
7461 Op0BO->getOperand(0)->getName());
7462 uint32_t Op1Val = Op1->getLimitedValue(TypeBits);
7463 return BinaryOperator::CreateAnd(X, ConstantInt::get(*Context,
7464 APInt::getHighBitsSet(TypeBits, TypeBits-Op1Val)));
7467 // Turn (((X >> C)&CC) + Y) << C -> (X + (Y << C)) & (CC << C)
7468 if (isLeftShift && Op0BO->getOperand(0)->hasOneUse() &&
7469 match(Op0BO->getOperand(0),
7470 m_And(m_Shr(m_Value(V1), m_Value(V2)),
7471 m_ConstantInt(CC))) && V2 == Op1 &&
7472 cast<BinaryOperator>(Op0BO->getOperand(0))
7473 ->getOperand(0)->hasOneUse()) {
7474 Value *YS = // (Y << C)
7475 Builder->CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName());
7477 Value *XM = Builder->CreateAnd(V1, ConstantExpr::getShl(CC, Op1),
7478 V1->getName()+".mask");
7480 return BinaryOperator::Create(Op0BO->getOpcode(), XM, YS);
7488 // If the operand is an bitwise operator with a constant RHS, and the
7489 // shift is the only use, we can pull it out of the shift.
7490 if (ConstantInt *Op0C = dyn_cast<ConstantInt>(Op0BO->getOperand(1))) {
7491 bool isValid = true; // Valid only for And, Or, Xor
7492 bool highBitSet = false; // Transform if high bit of constant set?
7494 switch (Op0BO->getOpcode()) {
7495 default: isValid = false; break; // Do not perform transform!
7496 case Instruction::Add:
7497 isValid = isLeftShift;
7499 case Instruction::Or:
7500 case Instruction::Xor:
7503 case Instruction::And:
7508 // If this is a signed shift right, and the high bit is modified
7509 // by the logical operation, do not perform the transformation.
7510 // The highBitSet boolean indicates the value of the high bit of
7511 // the constant which would cause it to be modified for this
7514 if (isValid && I.getOpcode() == Instruction::AShr)
7515 isValid = Op0C->getValue()[TypeBits-1] == highBitSet;
7518 Constant *NewRHS = ConstantExpr::get(I.getOpcode(), Op0C, Op1);
7521 Builder->CreateBinOp(I.getOpcode(), Op0BO->getOperand(0), Op1);
7522 NewShift->takeName(Op0BO);
7524 return BinaryOperator::Create(Op0BO->getOpcode(), NewShift,
7531 // Find out if this is a shift of a shift by a constant.
7532 BinaryOperator *ShiftOp = dyn_cast<BinaryOperator>(Op0);
7533 if (ShiftOp && !ShiftOp->isShift())
7536 if (ShiftOp && isa<ConstantInt>(ShiftOp->getOperand(1))) {
7537 ConstantInt *ShiftAmt1C = cast<ConstantInt>(ShiftOp->getOperand(1));
7538 uint32_t ShiftAmt1 = ShiftAmt1C->getLimitedValue(TypeBits);
7539 uint32_t ShiftAmt2 = Op1->getLimitedValue(TypeBits);
7540 assert(ShiftAmt2 != 0 && "Should have been simplified earlier");
7541 if (ShiftAmt1 == 0) return 0; // Will be simplified in the future.
7542 Value *X = ShiftOp->getOperand(0);
7544 uint32_t AmtSum = ShiftAmt1+ShiftAmt2; // Fold into one big shift.
7546 const IntegerType *Ty = cast<IntegerType>(I.getType());
7548 // Check for (X << c1) << c2 and (X >> c1) >> c2
7549 if (I.getOpcode() == ShiftOp->getOpcode()) {
7550 // If this is oversized composite shift, then unsigned shifts get 0, ashr
7552 if (AmtSum >= TypeBits) {
7553 if (I.getOpcode() != Instruction::AShr)
7554 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
7555 AmtSum = TypeBits-1; // Saturate to 31 for i32 ashr.
7558 return BinaryOperator::Create(I.getOpcode(), X,
7559 ConstantInt::get(Ty, AmtSum));
7562 if (ShiftOp->getOpcode() == Instruction::LShr &&
7563 I.getOpcode() == Instruction::AShr) {
7564 if (AmtSum >= TypeBits)
7565 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
7567 // ((X >>u C1) >>s C2) -> (X >>u (C1+C2)) since C1 != 0.
7568 return BinaryOperator::CreateLShr(X, ConstantInt::get(Ty, AmtSum));
7571 if (ShiftOp->getOpcode() == Instruction::AShr &&
7572 I.getOpcode() == Instruction::LShr) {
7573 // ((X >>s C1) >>u C2) -> ((X >>s (C1+C2)) & mask) since C1 != 0.
7574 if (AmtSum >= TypeBits)
7575 AmtSum = TypeBits-1;
7577 Value *Shift = Builder->CreateAShr(X, ConstantInt::get(Ty, AmtSum));
7579 APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
7580 return BinaryOperator::CreateAnd(Shift, ConstantInt::get(*Context, Mask));
7583 // Okay, if we get here, one shift must be left, and the other shift must be
7584 // right. See if the amounts are equal.
7585 if (ShiftAmt1 == ShiftAmt2) {
7586 // If we have ((X >>? C) << C), turn this into X & (-1 << C).
7587 if (I.getOpcode() == Instruction::Shl) {
7588 APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt1));
7589 return BinaryOperator::CreateAnd(X, ConstantInt::get(*Context, Mask));
7591 // If we have ((X << C) >>u C), turn this into X & (-1 >>u C).
7592 if (I.getOpcode() == Instruction::LShr) {
7593 APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt1));
7594 return BinaryOperator::CreateAnd(X, ConstantInt::get(*Context, Mask));
7596 // We can simplify ((X << C) >>s C) into a trunc + sext.
7597 // NOTE: we could do this for any C, but that would make 'unusual' integer
7598 // types. For now, just stick to ones well-supported by the code
7600 const Type *SExtType = 0;
7601 switch (Ty->getBitWidth() - ShiftAmt1) {
7608 SExtType = IntegerType::get(*Context, Ty->getBitWidth() - ShiftAmt1);
7613 return new SExtInst(Builder->CreateTrunc(X, SExtType, "sext"), Ty);
7614 // Otherwise, we can't handle it yet.
7615 } else if (ShiftAmt1 < ShiftAmt2) {
7616 uint32_t ShiftDiff = ShiftAmt2-ShiftAmt1;
7618 // (X >>? C1) << C2 --> X << (C2-C1) & (-1 << C2)
7619 if (I.getOpcode() == Instruction::Shl) {
7620 assert(ShiftOp->getOpcode() == Instruction::LShr ||
7621 ShiftOp->getOpcode() == Instruction::AShr);
7622 Value *Shift = Builder->CreateShl(X, ConstantInt::get(Ty, ShiftDiff));
7624 APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt2));
7625 return BinaryOperator::CreateAnd(Shift,
7626 ConstantInt::get(*Context, Mask));
7629 // (X << C1) >>u C2 --> X >>u (C2-C1) & (-1 >> C2)
7630 if (I.getOpcode() == Instruction::LShr) {
7631 assert(ShiftOp->getOpcode() == Instruction::Shl);
7632 Value *Shift = Builder->CreateLShr(X, ConstantInt::get(Ty, ShiftDiff));
7634 APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
7635 return BinaryOperator::CreateAnd(Shift,
7636 ConstantInt::get(*Context, Mask));
7639 // We can't handle (X << C1) >>s C2, it shifts arbitrary bits in.
7641 assert(ShiftAmt2 < ShiftAmt1);
7642 uint32_t ShiftDiff = ShiftAmt1-ShiftAmt2;
7644 // (X >>? C1) << C2 --> X >>? (C1-C2) & (-1 << C2)
7645 if (I.getOpcode() == Instruction::Shl) {
7646 assert(ShiftOp->getOpcode() == Instruction::LShr ||
7647 ShiftOp->getOpcode() == Instruction::AShr);
7648 Value *Shift = Builder->CreateBinOp(ShiftOp->getOpcode(), X,
7649 ConstantInt::get(Ty, ShiftDiff));
7651 APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt2));
7652 return BinaryOperator::CreateAnd(Shift,
7653 ConstantInt::get(*Context, Mask));
7656 // (X << C1) >>u C2 --> X << (C1-C2) & (-1 >> C2)
7657 if (I.getOpcode() == Instruction::LShr) {
7658 assert(ShiftOp->getOpcode() == Instruction::Shl);
7659 Value *Shift = Builder->CreateShl(X, ConstantInt::get(Ty, ShiftDiff));
7661 APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
7662 return BinaryOperator::CreateAnd(Shift,
7663 ConstantInt::get(*Context, Mask));
7666 // We can't handle (X << C1) >>a C2, it shifts arbitrary bits in.
7673 /// DecomposeSimpleLinearExpr - Analyze 'Val', seeing if it is a simple linear
7674 /// expression. If so, decompose it, returning some value X, such that Val is
7677 static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
7678 int &Offset, LLVMContext *Context) {
7679 assert(Val->getType() == Type::getInt32Ty(*Context) &&
7680 "Unexpected allocation size type!");
7681 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
7682 Offset = CI->getZExtValue();
7684 return ConstantInt::get(Type::getInt32Ty(*Context), 0);
7685 } else if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) {
7686 if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
7687 if (I->getOpcode() == Instruction::Shl) {
7688 // This is a value scaled by '1 << the shift amt'.
7689 Scale = 1U << RHS->getZExtValue();
7691 return I->getOperand(0);
7692 } else if (I->getOpcode() == Instruction::Mul) {
7693 // This value is scaled by 'RHS'.
7694 Scale = RHS->getZExtValue();
7696 return I->getOperand(0);
7697 } else if (I->getOpcode() == Instruction::Add) {
7698 // We have X+C. Check to see if we really have (X*C2)+C1,
7699 // where C1 is divisible by C2.
7702 DecomposeSimpleLinearExpr(I->getOperand(0), SubScale,
7704 Offset += RHS->getZExtValue();
7711 // Otherwise, we can't look past this.
7718 /// PromoteCastOfAllocation - If we find a cast of an allocation instruction,
7719 /// try to eliminate the cast by moving the type information into the alloc.
7720 Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
7721 AllocationInst &AI) {
7722 const PointerType *PTy = cast<PointerType>(CI.getType());
7724 BuilderTy AllocaBuilder(*Builder);
7725 AllocaBuilder.SetInsertPoint(AI.getParent(), &AI);
7727 // Remove any uses of AI that are dead.
7728 assert(!CI.use_empty() && "Dead instructions should be removed earlier!");
7730 for (Value::use_iterator UI = AI.use_begin(), E = AI.use_end(); UI != E; ) {
7731 Instruction *User = cast<Instruction>(*UI++);
7732 if (isInstructionTriviallyDead(User)) {
7733 while (UI != E && *UI == User)
7734 ++UI; // If this instruction uses AI more than once, don't break UI.
7737 DEBUG(errs() << "IC: DCE: " << *User << '\n');
7738 EraseInstFromFunction(*User);
7742 // This requires TargetData to get the alloca alignment and size information.
7745 // Get the type really allocated and the type casted to.
7746 const Type *AllocElTy = AI.getAllocatedType();
7747 const Type *CastElTy = PTy->getElementType();
7748 if (!AllocElTy->isSized() || !CastElTy->isSized()) return 0;
7750 unsigned AllocElTyAlign = TD->getABITypeAlignment(AllocElTy);
7751 unsigned CastElTyAlign = TD->getABITypeAlignment(CastElTy);
7752 if (CastElTyAlign < AllocElTyAlign) return 0;
7754 // If the allocation has multiple uses, only promote it if we are strictly
7755 // increasing the alignment of the resultant allocation. If we keep it the
7756 // same, we open the door to infinite loops of various kinds. (A reference
7757 // from a dbg.declare doesn't count as a use for this purpose.)
7758 if (!AI.hasOneUse() && !hasOneUsePlusDeclare(&AI) &&
7759 CastElTyAlign == AllocElTyAlign) return 0;
7761 uint64_t AllocElTySize = TD->getTypeAllocSize(AllocElTy);
7762 uint64_t CastElTySize = TD->getTypeAllocSize(CastElTy);
7763 if (CastElTySize == 0 || AllocElTySize == 0) return 0;
7765 // See if we can satisfy the modulus by pulling a scale out of the array
7767 unsigned ArraySizeScale;
7769 Value *NumElements = // See if the array size is a decomposable linear expr.
7770 DecomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale,
7771 ArrayOffset, Context);
7773 // If we can now satisfy the modulus, by using a non-1 scale, we really can
7775 if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 ||
7776 (AllocElTySize*ArrayOffset ) % CastElTySize != 0) return 0;
7778 unsigned Scale = (AllocElTySize*ArraySizeScale)/CastElTySize;
7783 Amt = ConstantInt::get(Type::getInt32Ty(*Context), Scale);
7784 // Insert before the alloca, not before the cast.
7785 Amt = AllocaBuilder.CreateMul(Amt, NumElements, "tmp");
7788 if (int Offset = (AllocElTySize*ArrayOffset)/CastElTySize) {
7789 Value *Off = ConstantInt::get(Type::getInt32Ty(*Context), Offset, true);
7790 Amt = AllocaBuilder.CreateAdd(Amt, Off, "tmp");
7793 AllocationInst *New;
7794 if (isa<MallocInst>(AI))
7795 New = AllocaBuilder.CreateMalloc(CastElTy, Amt);
7797 New = AllocaBuilder.CreateAlloca(CastElTy, Amt);
7798 New->setAlignment(AI.getAlignment());
7801 // If the allocation has one real use plus a dbg.declare, just remove the
7803 if (DbgDeclareInst *DI = hasOneUsePlusDeclare(&AI)) {
7804 EraseInstFromFunction(*DI);
7806 // If the allocation has multiple real uses, insert a cast and change all
7807 // things that used it to use the new cast. This will also hack on CI, but it
7809 else if (!AI.hasOneUse()) {
7810 // New is the allocation instruction, pointer typed. AI is the original
7811 // allocation instruction, also pointer typed. Thus, cast to use is BitCast.
7812 Value *NewCast = AllocaBuilder.CreateBitCast(New, AI.getType(), "tmpcast");
7813 AI.replaceAllUsesWith(NewCast);
7815 return ReplaceInstUsesWith(CI, New);
7818 /// CanEvaluateInDifferentType - Return true if we can take the specified value
7819 /// and return it as type Ty without inserting any new casts and without
7820 /// changing the computed value. This is used by code that tries to decide
7821 /// whether promoting or shrinking integer operations to wider or smaller types
7822 /// will allow us to eliminate a truncate or extend.
7824 /// This is a truncation operation if Ty is smaller than V->getType(), or an
7825 /// extension operation if Ty is larger.
7827 /// If CastOpc is a truncation, then Ty will be a type smaller than V. We
7828 /// should return true if trunc(V) can be computed by computing V in the smaller
7829 /// type. If V is an instruction, then trunc(inst(x,y)) can be computed as
7830 /// inst(trunc(x),trunc(y)), which only makes sense if x and y can be
7831 /// efficiently truncated.
7833 /// If CastOpc is a sext or zext, we are asking if the low bits of the value can
7834 /// bit computed in a larger type, which is then and'd or sext_in_reg'd to get
7835 /// the final result.
7836 bool InstCombiner::CanEvaluateInDifferentType(Value *V, const Type *Ty,
7838 int &NumCastsRemoved){
7839 // We can always evaluate constants in another type.
7840 if (isa<Constant>(V))
7843 Instruction *I = dyn_cast<Instruction>(V);
7844 if (!I) return false;
7846 const Type *OrigTy = V->getType();
7848 // If this is an extension or truncate, we can often eliminate it.
7849 if (isa<TruncInst>(I) || isa<ZExtInst>(I) || isa<SExtInst>(I)) {
7850 // If this is a cast from the destination type, we can trivially eliminate
7851 // it, and this will remove a cast overall.
7852 if (I->getOperand(0)->getType() == Ty) {
7853 // If the first operand is itself a cast, and is eliminable, do not count
7854 // this as an eliminable cast. We would prefer to eliminate those two
7856 if (!isa<CastInst>(I->getOperand(0)) && I->hasOneUse())
7862 // We can't extend or shrink something that has multiple uses: doing so would
7863 // require duplicating the instruction in general, which isn't profitable.
7864 if (!I->hasOneUse()) return false;
7866 unsigned Opc = I->getOpcode();
7868 case Instruction::Add:
7869 case Instruction::Sub:
7870 case Instruction::Mul:
7871 case Instruction::And:
7872 case Instruction::Or:
7873 case Instruction::Xor:
7874 // These operators can all arbitrarily be extended or truncated.
7875 return CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc,
7877 CanEvaluateInDifferentType(I->getOperand(1), Ty, CastOpc,
7880 case Instruction::UDiv:
7881 case Instruction::URem: {
7882 // UDiv and URem can be truncated if all the truncated bits are zero.
7883 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
7884 uint32_t BitWidth = Ty->getScalarSizeInBits();
7885 if (BitWidth < OrigBitWidth) {
7886 APInt Mask = APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth);
7887 if (MaskedValueIsZero(I->getOperand(0), Mask) &&
7888 MaskedValueIsZero(I->getOperand(1), Mask)) {
7889 return CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc,
7891 CanEvaluateInDifferentType(I->getOperand(1), Ty, CastOpc,
7897 case Instruction::Shl:
7898 // If we are truncating the result of this SHL, and if it's a shift of a
7899 // constant amount, we can always perform a SHL in a smaller type.
7900 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
7901 uint32_t BitWidth = Ty->getScalarSizeInBits();
7902 if (BitWidth < OrigTy->getScalarSizeInBits() &&
7903 CI->getLimitedValue(BitWidth) < BitWidth)
7904 return CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc,
7908 case Instruction::LShr:
7909 // If this is a truncate of a logical shr, we can truncate it to a smaller
7910 // lshr iff we know that the bits we would otherwise be shifting in are
7912 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
7913 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
7914 uint32_t BitWidth = Ty->getScalarSizeInBits();
7915 if (BitWidth < OrigBitWidth &&
7916 MaskedValueIsZero(I->getOperand(0),
7917 APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth)) &&
7918 CI->getLimitedValue(BitWidth) < BitWidth) {
7919 return CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc,
7924 case Instruction::ZExt:
7925 case Instruction::SExt:
7926 case Instruction::Trunc:
7927 // If this is the same kind of case as our original (e.g. zext+zext), we
7928 // can safely replace it. Note that replacing it does not reduce the number
7929 // of casts in the input.
7933 // sext (zext ty1), ty2 -> zext ty2
7934 if (CastOpc == Instruction::SExt && Opc == Instruction::ZExt)
7937 case Instruction::Select: {
7938 SelectInst *SI = cast<SelectInst>(I);
7939 return CanEvaluateInDifferentType(SI->getTrueValue(), Ty, CastOpc,
7941 CanEvaluateInDifferentType(SI->getFalseValue(), Ty, CastOpc,
7944 case Instruction::PHI: {
7945 // We can change a phi if we can change all operands.
7946 PHINode *PN = cast<PHINode>(I);
7947 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
7948 if (!CanEvaluateInDifferentType(PN->getIncomingValue(i), Ty, CastOpc,
7954 // TODO: Can handle more cases here.
7961 /// EvaluateInDifferentType - Given an expression that
7962 /// CanEvaluateInDifferentType returns true for, actually insert the code to
7963 /// evaluate the expression.
7964 Value *InstCombiner::EvaluateInDifferentType(Value *V, const Type *Ty,
7966 if (Constant *C = dyn_cast<Constant>(V))
7967 return ConstantExpr::getIntegerCast(C, Ty,
7968 isSigned /*Sext or ZExt*/);
7970 // Otherwise, it must be an instruction.
7971 Instruction *I = cast<Instruction>(V);
7972 Instruction *Res = 0;
7973 unsigned Opc = I->getOpcode();
7975 case Instruction::Add:
7976 case Instruction::Sub:
7977 case Instruction::Mul:
7978 case Instruction::And:
7979 case Instruction::Or:
7980 case Instruction::Xor:
7981 case Instruction::AShr:
7982 case Instruction::LShr:
7983 case Instruction::Shl:
7984 case Instruction::UDiv:
7985 case Instruction::URem: {
7986 Value *LHS = EvaluateInDifferentType(I->getOperand(0), Ty, isSigned);
7987 Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
7988 Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS);
7991 case Instruction::Trunc:
7992 case Instruction::ZExt:
7993 case Instruction::SExt:
7994 // If the source type of the cast is the type we're trying for then we can
7995 // just return the source. There's no need to insert it because it is not
7997 if (I->getOperand(0)->getType() == Ty)
7998 return I->getOperand(0);
8000 // Otherwise, must be the same type of cast, so just reinsert a new one.
8001 Res = CastInst::Create(cast<CastInst>(I)->getOpcode(), I->getOperand(0),
8004 case Instruction::Select: {
8005 Value *True = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
8006 Value *False = EvaluateInDifferentType(I->getOperand(2), Ty, isSigned);
8007 Res = SelectInst::Create(I->getOperand(0), True, False);
8010 case Instruction::PHI: {
8011 PHINode *OPN = cast<PHINode>(I);
8012 PHINode *NPN = PHINode::Create(Ty);
8013 for (unsigned i = 0, e = OPN->getNumIncomingValues(); i != e; ++i) {
8014 Value *V =EvaluateInDifferentType(OPN->getIncomingValue(i), Ty, isSigned);
8015 NPN->addIncoming(V, OPN->getIncomingBlock(i));
8021 // TODO: Can handle more cases here.
8022 llvm_unreachable("Unreachable!");
8027 return InsertNewInstBefore(Res, *I);
8030 /// @brief Implement the transforms common to all CastInst visitors.
8031 Instruction *InstCombiner::commonCastTransforms(CastInst &CI) {
8032 Value *Src = CI.getOperand(0);
8034 // Many cases of "cast of a cast" are eliminable. If it's eliminable we just
8035 // eliminate it now.
8036 if (CastInst *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast
8037 if (Instruction::CastOps opc =
8038 isEliminableCastPair(CSrc, CI.getOpcode(), CI.getType(), TD)) {
8039 // The first cast (CSrc) is eliminable so we need to fix up or replace
8040 // the second cast (CI). CSrc will then have a good chance of being dead.
8041 return CastInst::Create(opc, CSrc->getOperand(0), CI.getType());
8045 // If we are casting a select then fold the cast into the select
8046 if (SelectInst *SI = dyn_cast<SelectInst>(Src))
8047 if (Instruction *NV = FoldOpIntoSelect(CI, SI, this))
8050 // If we are casting a PHI then fold the cast into the PHI
8051 if (isa<PHINode>(Src))
8052 if (Instruction *NV = FoldOpIntoPhi(CI))
8058 /// FindElementAtOffset - Given a type and a constant offset, determine whether
8059 /// or not there is a sequence of GEP indices into the type that will land us at
8060 /// the specified offset. If so, fill them into NewIndices and return the
8061 /// resultant element type, otherwise return null.
8062 static const Type *FindElementAtOffset(const Type *Ty, int64_t Offset,
8063 SmallVectorImpl<Value*> &NewIndices,
8064 const TargetData *TD,
8065 LLVMContext *Context) {
8067 if (!Ty->isSized()) return 0;
8069 // Start with the index over the outer type. Note that the type size
8070 // might be zero (even if the offset isn't zero) if the indexed type
8071 // is something like [0 x {int, int}]
8072 const Type *IntPtrTy = TD->getIntPtrType(*Context);
8073 int64_t FirstIdx = 0;
8074 if (int64_t TySize = TD->getTypeAllocSize(Ty)) {
8075 FirstIdx = Offset/TySize;
8076 Offset -= FirstIdx*TySize;
8078 // Handle hosts where % returns negative instead of values [0..TySize).
8082 assert(Offset >= 0);
8084 assert((uint64_t)Offset < (uint64_t)TySize && "Out of range offset");
8087 NewIndices.push_back(ConstantInt::get(IntPtrTy, FirstIdx));
8089 // Index into the types. If we fail, set OrigBase to null.
8091 // Indexing into tail padding between struct/array elements.
8092 if (uint64_t(Offset*8) >= TD->getTypeSizeInBits(Ty))
8095 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
8096 const StructLayout *SL = TD->getStructLayout(STy);
8097 assert(Offset < (int64_t)SL->getSizeInBytes() &&
8098 "Offset must stay within the indexed type");
8100 unsigned Elt = SL->getElementContainingOffset(Offset);
8101 NewIndices.push_back(ConstantInt::get(Type::getInt32Ty(*Context), Elt));
8103 Offset -= SL->getElementOffset(Elt);
8104 Ty = STy->getElementType(Elt);
8105 } else if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
8106 uint64_t EltSize = TD->getTypeAllocSize(AT->getElementType());
8107 assert(EltSize && "Cannot index into a zero-sized array");
8108 NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));
8110 Ty = AT->getElementType();
8112 // Otherwise, we can't index into the middle of this atomic type, bail.
8120 /// @brief Implement the transforms for cast of pointer (bitcast/ptrtoint)
8121 Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
8122 Value *Src = CI.getOperand(0);
8124 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Src)) {
8125 // If casting the result of a getelementptr instruction with no offset, turn
8126 // this into a cast of the original pointer!
8127 if (GEP->hasAllZeroIndices()) {
8128 // Changing the cast operand is usually not a good idea but it is safe
8129 // here because the pointer operand is being replaced with another
8130 // pointer operand so the opcode doesn't need to change.
8132 CI.setOperand(0, GEP->getOperand(0));
8136 // If the GEP has a single use, and the base pointer is a bitcast, and the
8137 // GEP computes a constant offset, see if we can convert these three
8138 // instructions into fewer. This typically happens with unions and other
8139 // non-type-safe code.
8140 if (TD && GEP->hasOneUse() && isa<BitCastInst>(GEP->getOperand(0))) {
8141 if (GEP->hasAllConstantIndices()) {
8142 // We are guaranteed to get a constant from EmitGEPOffset.
8143 ConstantInt *OffsetV =
8144 cast<ConstantInt>(EmitGEPOffset(GEP, CI, *this));
8145 int64_t Offset = OffsetV->getSExtValue();
8147 // Get the base pointer input of the bitcast, and the type it points to.
8148 Value *OrigBase = cast<BitCastInst>(GEP->getOperand(0))->getOperand(0);
8149 const Type *GEPIdxTy =
8150 cast<PointerType>(OrigBase->getType())->getElementType();
8151 SmallVector<Value*, 8> NewIndices;
8152 if (FindElementAtOffset(GEPIdxTy, Offset, NewIndices, TD, Context)) {
8153 // If we were able to index down into an element, create the GEP
8154 // and bitcast the result. This eliminates one bitcast, potentially
8156 Value *NGEP = cast<GEPOperator>(GEP)->isInBounds() ?
8157 Builder->CreateInBoundsGEP(OrigBase,
8158 NewIndices.begin(), NewIndices.end()) :
8159 Builder->CreateGEP(OrigBase, NewIndices.begin(), NewIndices.end());
8160 NGEP->takeName(GEP);
8162 if (isa<BitCastInst>(CI))
8163 return new BitCastInst(NGEP, CI.getType());
8164 assert(isa<PtrToIntInst>(CI));
8165 return new PtrToIntInst(NGEP, CI.getType());
8171 return commonCastTransforms(CI);
8174 /// isSafeIntegerType - Return true if this is a basic integer type, not a crazy
8175 /// type like i42. We don't want to introduce operations on random non-legal
8176 /// integer types where they don't already exist in the code. In the future,
8177 /// we should consider making this based off target-data, so that 32-bit targets
8178 /// won't get i64 operations etc.
8179 static bool isSafeIntegerType(const Type *Ty) {
8180 switch (Ty->getPrimitiveSizeInBits()) {
8191 /// commonIntCastTransforms - This function implements the common transforms
8192 /// for trunc, zext, and sext.
8193 Instruction *InstCombiner::commonIntCastTransforms(CastInst &CI) {
8194 if (Instruction *Result = commonCastTransforms(CI))
8197 Value *Src = CI.getOperand(0);
8198 const Type *SrcTy = Src->getType();
8199 const Type *DestTy = CI.getType();
8200 uint32_t SrcBitSize = SrcTy->getScalarSizeInBits();
8201 uint32_t DestBitSize = DestTy->getScalarSizeInBits();
8203 // See if we can simplify any instructions used by the LHS whose sole
8204 // purpose is to compute bits we don't care about.
8205 if (SimplifyDemandedInstructionBits(CI))
8208 // If the source isn't an instruction or has more than one use then we
8209 // can't do anything more.
8210 Instruction *SrcI = dyn_cast<Instruction>(Src);
8211 if (!SrcI || !Src->hasOneUse())
8214 // Attempt to propagate the cast into the instruction for int->int casts.
8215 int NumCastsRemoved = 0;
8216 // Only do this if the dest type is a simple type, don't convert the
8217 // expression tree to something weird like i93 unless the source is also
8219 if ((isSafeIntegerType(DestTy->getScalarType()) ||
8220 !isSafeIntegerType(SrcI->getType()->getScalarType())) &&
8221 CanEvaluateInDifferentType(SrcI, DestTy,
8222 CI.getOpcode(), NumCastsRemoved)) {
8223 // If this cast is a truncate, evaluting in a different type always
8224 // eliminates the cast, so it is always a win. If this is a zero-extension,
8225 // we need to do an AND to maintain the clear top-part of the computation,
8226 // so we require that the input have eliminated at least one cast. If this
8227 // is a sign extension, we insert two new casts (to do the extension) so we
8228 // require that two casts have been eliminated.
8229 bool DoXForm = false;
8230 bool JustReplace = false;
8231 switch (CI.getOpcode()) {
8233 // All the others use floating point so we shouldn't actually
8234 // get here because of the check above.
8235 llvm_unreachable("Unknown cast type");
8236 case Instruction::Trunc:
8239 case Instruction::ZExt: {
8240 DoXForm = NumCastsRemoved >= 1;
8241 if (!DoXForm && 0) {
8242 // If it's unnecessary to issue an AND to clear the high bits, it's
8243 // always profitable to do this xform.
8244 Value *TryRes = EvaluateInDifferentType(SrcI, DestTy, false);
8245 APInt Mask(APInt::getBitsSet(DestBitSize, SrcBitSize, DestBitSize));
8246 if (MaskedValueIsZero(TryRes, Mask))
8247 return ReplaceInstUsesWith(CI, TryRes);
8249 if (Instruction *TryI = dyn_cast<Instruction>(TryRes))
8250 if (TryI->use_empty())
8251 EraseInstFromFunction(*TryI);
8255 case Instruction::SExt: {
8256 DoXForm = NumCastsRemoved >= 2;
8257 if (!DoXForm && !isa<TruncInst>(SrcI) && 0) {
8258 // If we do not have to emit the truncate + sext pair, then it's always
8259 // profitable to do this xform.
8261 // It's not safe to eliminate the trunc + sext pair if one of the
8262 // eliminated cast is a truncate. e.g.
8263 // t2 = trunc i32 t1 to i16
8264 // t3 = sext i16 t2 to i32
8267 Value *TryRes = EvaluateInDifferentType(SrcI, DestTy, true);
8268 unsigned NumSignBits = ComputeNumSignBits(TryRes);
8269 if (NumSignBits > (DestBitSize - SrcBitSize))
8270 return ReplaceInstUsesWith(CI, TryRes);
8272 if (Instruction *TryI = dyn_cast<Instruction>(TryRes))
8273 if (TryI->use_empty())
8274 EraseInstFromFunction(*TryI);
8281 DEBUG(errs() << "ICE: EvaluateInDifferentType converting expression type"
8282 " to avoid cast: " << CI);
8283 Value *Res = EvaluateInDifferentType(SrcI, DestTy,
8284 CI.getOpcode() == Instruction::SExt);
8286 // Just replace this cast with the result.
8287 return ReplaceInstUsesWith(CI, Res);
8289 assert(Res->getType() == DestTy);
8290 switch (CI.getOpcode()) {
8291 default: llvm_unreachable("Unknown cast type!");
8292 case Instruction::Trunc:
8293 // Just replace this cast with the result.
8294 return ReplaceInstUsesWith(CI, Res);
8295 case Instruction::ZExt: {
8296 assert(SrcBitSize < DestBitSize && "Not a zext?");
8298 // If the high bits are already zero, just replace this cast with the
8300 APInt Mask(APInt::getBitsSet(DestBitSize, SrcBitSize, DestBitSize));
8301 if (MaskedValueIsZero(Res, Mask))
8302 return ReplaceInstUsesWith(CI, Res);
8304 // We need to emit an AND to clear the high bits.
8305 Constant *C = ConstantInt::get(*Context,
8306 APInt::getLowBitsSet(DestBitSize, SrcBitSize));
8307 return BinaryOperator::CreateAnd(Res, C);
8309 case Instruction::SExt: {
8310 // If the high bits are already filled with sign bit, just replace this
8311 // cast with the result.
8312 unsigned NumSignBits = ComputeNumSignBits(Res);
8313 if (NumSignBits > (DestBitSize - SrcBitSize))
8314 return ReplaceInstUsesWith(CI, Res);
8316 // We need to emit a cast to truncate, then a cast to sext.
8317 return new SExtInst(Builder->CreateTrunc(Res, Src->getType()), DestTy);
8323 Value *Op0 = SrcI->getNumOperands() > 0 ? SrcI->getOperand(0) : 0;
8324 Value *Op1 = SrcI->getNumOperands() > 1 ? SrcI->getOperand(1) : 0;
8326 switch (SrcI->getOpcode()) {
8327 case Instruction::Add:
8328 case Instruction::Mul:
8329 case Instruction::And:
8330 case Instruction::Or:
8331 case Instruction::Xor:
8332 // If we are discarding information, rewrite.
8333 if (DestBitSize < SrcBitSize && DestBitSize != 1) {
8334 // Don't insert two casts unless at least one can be eliminated.
8335 if (!ValueRequiresCast(CI.getOpcode(), Op1, DestTy, TD) ||
8336 !ValueRequiresCast(CI.getOpcode(), Op0, DestTy, TD)) {
8337 Value *Op0c = Builder->CreateTrunc(Op0, DestTy, Op0->getName());
8338 Value *Op1c = Builder->CreateTrunc(Op1, DestTy, Op1->getName());
8339 return BinaryOperator::Create(
8340 cast<BinaryOperator>(SrcI)->getOpcode(), Op0c, Op1c);
8344 // cast (xor bool X, true) to int --> xor (cast bool X to int), 1
8345 if (isa<ZExtInst>(CI) && SrcBitSize == 1 &&
8346 SrcI->getOpcode() == Instruction::Xor &&
8347 Op1 == ConstantInt::getTrue(*Context) &&
8348 (!Op0->hasOneUse() || !isa<CmpInst>(Op0))) {
8349 Value *New = Builder->CreateZExt(Op0, DestTy, Op0->getName());
8350 return BinaryOperator::CreateXor(New,
8351 ConstantInt::get(CI.getType(), 1));
8355 case Instruction::Shl: {
8356 // Canonicalize trunc inside shl, if we can.
8357 ConstantInt *CI = dyn_cast<ConstantInt>(Op1);
8358 if (CI && DestBitSize < SrcBitSize &&
8359 CI->getLimitedValue(DestBitSize) < DestBitSize) {
8360 Value *Op0c = Builder->CreateTrunc(Op0, DestTy, Op0->getName());
8361 Value *Op1c = Builder->CreateTrunc(Op1, DestTy, Op1->getName());
8362 return BinaryOperator::CreateShl(Op0c, Op1c);
8370 Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
8371 if (Instruction *Result = commonIntCastTransforms(CI))
8374 Value *Src = CI.getOperand(0);
8375 const Type *Ty = CI.getType();
8376 uint32_t DestBitWidth = Ty->getScalarSizeInBits();
8377 uint32_t SrcBitWidth = Src->getType()->getScalarSizeInBits();
8379 // Canonicalize trunc x to i1 -> (icmp ne (and x, 1), 0)
8380 if (DestBitWidth == 1) {
8381 Constant *One = ConstantInt::get(Src->getType(), 1);
8382 Src = Builder->CreateAnd(Src, One, "tmp");
8383 Value *Zero = Constant::getNullValue(Src->getType());
8384 return new ICmpInst(ICmpInst::ICMP_NE, Src, Zero);
8387 // Optimize trunc(lshr(), c) to pull the shift through the truncate.
8388 ConstantInt *ShAmtV = 0;
8390 if (Src->hasOneUse() &&
8391 match(Src, m_LShr(m_Value(ShiftOp), m_ConstantInt(ShAmtV)))) {
8392 uint32_t ShAmt = ShAmtV->getLimitedValue(SrcBitWidth);
8394 // Get a mask for the bits shifting in.
8395 APInt Mask(APInt::getLowBitsSet(SrcBitWidth, ShAmt).shl(DestBitWidth));
8396 if (MaskedValueIsZero(ShiftOp, Mask)) {
8397 if (ShAmt >= DestBitWidth) // All zeros.
8398 return ReplaceInstUsesWith(CI, Constant::getNullValue(Ty));
8400 // Okay, we can shrink this. Truncate the input, then return a new
8402 Value *V1 = Builder->CreateTrunc(ShiftOp, Ty, ShiftOp->getName());
8403 Value *V2 = ConstantExpr::getTrunc(ShAmtV, Ty);
8404 return BinaryOperator::CreateLShr(V1, V2);
8411 /// transformZExtICmp - Transform (zext icmp) to bitwise / integer operations
8412 /// in order to eliminate the icmp.
8413 Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
8415 // If we are just checking for a icmp eq of a single bit and zext'ing it
8416 // to an integer, then shift the bit to the appropriate place and then
8417 // cast to integer to avoid the comparison.
8418 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(ICI->getOperand(1))) {
8419 const APInt &Op1CV = Op1C->getValue();
8421 // zext (x <s 0) to i32 --> x>>u31 true if signbit set.
8422 // zext (x >s -1) to i32 --> (x>>u31)^1 true if signbit clear.
8423 if ((ICI->getPredicate() == ICmpInst::ICMP_SLT && Op1CV == 0) ||
8424 (ICI->getPredicate() == ICmpInst::ICMP_SGT &&Op1CV.isAllOnesValue())) {
8425 if (!DoXform) return ICI;
8427 Value *In = ICI->getOperand(0);
8428 Value *Sh = ConstantInt::get(In->getType(),
8429 In->getType()->getScalarSizeInBits()-1);
8430 In = Builder->CreateLShr(In, Sh, In->getName()+".lobit");
8431 if (In->getType() != CI.getType())
8432 In = Builder->CreateIntCast(In, CI.getType(), false/*ZExt*/, "tmp");
8434 if (ICI->getPredicate() == ICmpInst::ICMP_SGT) {
8435 Constant *One = ConstantInt::get(In->getType(), 1);
8436 In = Builder->CreateXor(In, One, In->getName()+".not");
8439 return ReplaceInstUsesWith(CI, In);
8444 // zext (X == 0) to i32 --> X^1 iff X has only the low bit set.
8445 // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
8446 // zext (X == 1) to i32 --> X iff X has only the low bit set.
8447 // zext (X == 2) to i32 --> X>>1 iff X has only the 2nd bit set.
8448 // zext (X != 0) to i32 --> X iff X has only the low bit set.
8449 // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set.
8450 // zext (X != 1) to i32 --> X^1 iff X has only the low bit set.
8451 // zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
8452 if ((Op1CV == 0 || Op1CV.isPowerOf2()) &&
8453 // This only works for EQ and NE
8454 ICI->isEquality()) {
8455 // If Op1C some other power of two, convert:
8456 uint32_t BitWidth = Op1C->getType()->getBitWidth();
8457 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
8458 APInt TypeMask(APInt::getAllOnesValue(BitWidth));
8459 ComputeMaskedBits(ICI->getOperand(0), TypeMask, KnownZero, KnownOne);
8461 APInt KnownZeroMask(~KnownZero);
8462 if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1?
8463 if (!DoXform) return ICI;
8465 bool isNE = ICI->getPredicate() == ICmpInst::ICMP_NE;
8466 if (Op1CV != 0 && (Op1CV != KnownZeroMask)) {
8467 // (X&4) == 2 --> false
8468 // (X&4) != 2 --> true
8469 Constant *Res = ConstantInt::get(Type::getInt1Ty(*Context), isNE);
8470 Res = ConstantExpr::getZExt(Res, CI.getType());
8471 return ReplaceInstUsesWith(CI, Res);
8474 uint32_t ShiftAmt = KnownZeroMask.logBase2();
8475 Value *In = ICI->getOperand(0);
8477 // Perform a logical shr by shiftamt.
8478 // Insert the shift to put the result in the low bit.
8479 In = Builder->CreateLShr(In, ConstantInt::get(In->getType(),ShiftAmt),
8480 In->getName()+".lobit");
8483 if ((Op1CV != 0) == isNE) { // Toggle the low bit.
8484 Constant *One = ConstantInt::get(In->getType(), 1);
8485 In = Builder->CreateXor(In, One, "tmp");
8488 if (CI.getType() == In->getType())
8489 return ReplaceInstUsesWith(CI, In);
8491 return CastInst::CreateIntegerCast(In, CI.getType(), false/*ZExt*/);
8499 Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
8500 // If one of the common conversion will work ..
8501 if (Instruction *Result = commonIntCastTransforms(CI))
8504 Value *Src = CI.getOperand(0);
8506 // If this is a TRUNC followed by a ZEXT then we are dealing with integral
8507 // types and if the sizes are just right we can convert this into a logical
8508 // 'and' which will be much cheaper than the pair of casts.
8509 if (TruncInst *CSrc = dyn_cast<TruncInst>(Src)) { // A->B->C cast
8510 // Get the sizes of the types involved. We know that the intermediate type
8511 // will be smaller than A or C, but don't know the relation between A and C.
8512 Value *A = CSrc->getOperand(0);
8513 unsigned SrcSize = A->getType()->getScalarSizeInBits();
8514 unsigned MidSize = CSrc->getType()->getScalarSizeInBits();
8515 unsigned DstSize = CI.getType()->getScalarSizeInBits();
8516 // If we're actually extending zero bits, then if
8517 // SrcSize < DstSize: zext(a & mask)
8518 // SrcSize == DstSize: a & mask
8519 // SrcSize > DstSize: trunc(a) & mask
8520 if (SrcSize < DstSize) {
8521 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
8522 Constant *AndConst = ConstantInt::get(A->getType(), AndValue);
8523 Value *And = Builder->CreateAnd(A, AndConst, CSrc->getName()+".mask");
8524 return new ZExtInst(And, CI.getType());
8527 if (SrcSize == DstSize) {
8528 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
8529 return BinaryOperator::CreateAnd(A, ConstantInt::get(A->getType(),
8532 if (SrcSize > DstSize) {
8533 Value *Trunc = Builder->CreateTrunc(A, CI.getType(), "tmp");
8534 APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize));
8535 return BinaryOperator::CreateAnd(Trunc,
8536 ConstantInt::get(Trunc->getType(),
8541 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src))
8542 return transformZExtICmp(ICI, CI);
8544 BinaryOperator *SrcI = dyn_cast<BinaryOperator>(Src);
8545 if (SrcI && SrcI->getOpcode() == Instruction::Or) {
8546 // zext (or icmp, icmp) --> or (zext icmp), (zext icmp) if at least one
8547 // of the (zext icmp) will be transformed.
8548 ICmpInst *LHS = dyn_cast<ICmpInst>(SrcI->getOperand(0));
8549 ICmpInst *RHS = dyn_cast<ICmpInst>(SrcI->getOperand(1));
8550 if (LHS && RHS && LHS->hasOneUse() && RHS->hasOneUse() &&
8551 (transformZExtICmp(LHS, CI, false) ||
8552 transformZExtICmp(RHS, CI, false))) {
8553 Value *LCast = Builder->CreateZExt(LHS, CI.getType(), LHS->getName());
8554 Value *RCast = Builder->CreateZExt(RHS, CI.getType(), RHS->getName());
8555 return BinaryOperator::Create(Instruction::Or, LCast, RCast);
8559 // zext(trunc(t) & C) -> (t & zext(C)).
8560 if (SrcI && SrcI->getOpcode() == Instruction::And && SrcI->hasOneUse())
8561 if (ConstantInt *C = dyn_cast<ConstantInt>(SrcI->getOperand(1)))
8562 if (TruncInst *TI = dyn_cast<TruncInst>(SrcI->getOperand(0))) {
8563 Value *TI0 = TI->getOperand(0);
8564 if (TI0->getType() == CI.getType())
8566 BinaryOperator::CreateAnd(TI0,
8567 ConstantExpr::getZExt(C, CI.getType()));
8570 // zext((trunc(t) & C) ^ C) -> ((t & zext(C)) ^ zext(C)).
8571 if (SrcI && SrcI->getOpcode() == Instruction::Xor && SrcI->hasOneUse())
8572 if (ConstantInt *C = dyn_cast<ConstantInt>(SrcI->getOperand(1)))
8573 if (BinaryOperator *And = dyn_cast<BinaryOperator>(SrcI->getOperand(0)))
8574 if (And->getOpcode() == Instruction::And && And->hasOneUse() &&
8575 And->getOperand(1) == C)
8576 if (TruncInst *TI = dyn_cast<TruncInst>(And->getOperand(0))) {
8577 Value *TI0 = TI->getOperand(0);
8578 if (TI0->getType() == CI.getType()) {
8579 Constant *ZC = ConstantExpr::getZExt(C, CI.getType());
8580 Value *NewAnd = Builder->CreateAnd(TI0, ZC, "tmp");
8581 return BinaryOperator::CreateXor(NewAnd, ZC);
8588 Instruction *InstCombiner::visitSExt(SExtInst &CI) {
8589 if (Instruction *I = commonIntCastTransforms(CI))
8592 Value *Src = CI.getOperand(0);
8594 // Canonicalize sign-extend from i1 to a select.
8595 if (Src->getType() == Type::getInt1Ty(*Context))
8596 return SelectInst::Create(Src,
8597 Constant::getAllOnesValue(CI.getType()),
8598 Constant::getNullValue(CI.getType()));
8600 // See if the value being truncated is already sign extended. If so, just
8601 // eliminate the trunc/sext pair.
8602 if (Operator::getOpcode(Src) == Instruction::Trunc) {
8603 Value *Op = cast<User>(Src)->getOperand(0);
8604 unsigned OpBits = Op->getType()->getScalarSizeInBits();
8605 unsigned MidBits = Src->getType()->getScalarSizeInBits();
8606 unsigned DestBits = CI.getType()->getScalarSizeInBits();
8607 unsigned NumSignBits = ComputeNumSignBits(Op);
8609 if (OpBits == DestBits) {
8610 // Op is i32, Mid is i8, and Dest is i32. If Op has more than 24 sign
8611 // bits, it is already ready.
8612 if (NumSignBits > DestBits-MidBits)
8613 return ReplaceInstUsesWith(CI, Op);
8614 } else if (OpBits < DestBits) {
8615 // Op is i32, Mid is i8, and Dest is i64. If Op has more than 24 sign
8616 // bits, just sext from i32.
8617 if (NumSignBits > OpBits-MidBits)
8618 return new SExtInst(Op, CI.getType(), "tmp");
8620 // Op is i64, Mid is i8, and Dest is i32. If Op has more than 56 sign
8621 // bits, just truncate to i32.
8622 if (NumSignBits > OpBits-MidBits)
8623 return new TruncInst(Op, CI.getType(), "tmp");
8627 // If the input is a shl/ashr pair of a same constant, then this is a sign
8628 // extension from a smaller value. If we could trust arbitrary bitwidth
8629 // integers, we could turn this into a truncate to the smaller bit and then
8630 // use a sext for the whole extension. Since we don't, look deeper and check
8631 // for a truncate. If the source and dest are the same type, eliminate the
8632 // trunc and extend and just do shifts. For example, turn:
8633 // %a = trunc i32 %i to i8
8634 // %b = shl i8 %a, 6
8635 // %c = ashr i8 %b, 6
8636 // %d = sext i8 %c to i32
8638 // %a = shl i32 %i, 30
8639 // %d = ashr i32 %a, 30
8641 ConstantInt *BA = 0, *CA = 0;
8642 if (match(Src, m_AShr(m_Shl(m_Value(A), m_ConstantInt(BA)),
8643 m_ConstantInt(CA))) &&
8644 BA == CA && isa<TruncInst>(A)) {
8645 Value *I = cast<TruncInst>(A)->getOperand(0);
8646 if (I->getType() == CI.getType()) {
8647 unsigned MidSize = Src->getType()->getScalarSizeInBits();
8648 unsigned SrcDstSize = CI.getType()->getScalarSizeInBits();
8649 unsigned ShAmt = CA->getZExtValue()+SrcDstSize-MidSize;
8650 Constant *ShAmtV = ConstantInt::get(CI.getType(), ShAmt);
8651 I = Builder->CreateShl(I, ShAmtV, CI.getName());
8652 return BinaryOperator::CreateAShr(I, ShAmtV);
8659 /// FitsInFPType - Return a Constant* for the specified FP constant if it fits
8660 /// in the specified FP type without changing its value.
8661 static Constant *FitsInFPType(ConstantFP *CFP, const fltSemantics &Sem,
8662 LLVMContext *Context) {
8664 APFloat F = CFP->getValueAPF();
8665 (void)F.convert(Sem, APFloat::rmNearestTiesToEven, &losesInfo);
8667 return ConstantFP::get(*Context, F);
8671 /// LookThroughFPExtensions - If this is an fp extension instruction, look
8672 /// through it until we get the source value.
8673 static Value *LookThroughFPExtensions(Value *V, LLVMContext *Context) {
8674 if (Instruction *I = dyn_cast<Instruction>(V))
8675 if (I->getOpcode() == Instruction::FPExt)
8676 return LookThroughFPExtensions(I->getOperand(0), Context);
8678 // If this value is a constant, return the constant in the smallest FP type
8679 // that can accurately represent it. This allows us to turn
8680 // (float)((double)X+2.0) into x+2.0f.
8681 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
8682 if (CFP->getType() == Type::getPPC_FP128Ty(*Context))
8683 return V; // No constant folding of this.
8684 // See if the value can be truncated to float and then reextended.
8685 if (Value *V = FitsInFPType(CFP, APFloat::IEEEsingle, Context))
8687 if (CFP->getType() == Type::getDoubleTy(*Context))
8688 return V; // Won't shrink.
8689 if (Value *V = FitsInFPType(CFP, APFloat::IEEEdouble, Context))
8691 // Don't try to shrink to various long double types.
8697 Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
8698 if (Instruction *I = commonCastTransforms(CI))
8701 // If we have fptrunc(fadd (fpextend x), (fpextend y)), where x and y are
8702 // smaller than the destination type, we can eliminate the truncate by doing
8703 // the add as the smaller type. This applies to fadd/fsub/fmul/fdiv as well as
8704 // many builtins (sqrt, etc).
8705 BinaryOperator *OpI = dyn_cast<BinaryOperator>(CI.getOperand(0));
8706 if (OpI && OpI->hasOneUse()) {
8707 switch (OpI->getOpcode()) {
8709 case Instruction::FAdd:
8710 case Instruction::FSub:
8711 case Instruction::FMul:
8712 case Instruction::FDiv:
8713 case Instruction::FRem:
8714 const Type *SrcTy = OpI->getType();
8715 Value *LHSTrunc = LookThroughFPExtensions(OpI->getOperand(0), Context);
8716 Value *RHSTrunc = LookThroughFPExtensions(OpI->getOperand(1), Context);
8717 if (LHSTrunc->getType() != SrcTy &&
8718 RHSTrunc->getType() != SrcTy) {
8719 unsigned DstSize = CI.getType()->getScalarSizeInBits();
8720 // If the source types were both smaller than the destination type of
8721 // the cast, do this xform.
8722 if (LHSTrunc->getType()->getScalarSizeInBits() <= DstSize &&
8723 RHSTrunc->getType()->getScalarSizeInBits() <= DstSize) {
8724 LHSTrunc = Builder->CreateFPExt(LHSTrunc, CI.getType());
8725 RHSTrunc = Builder->CreateFPExt(RHSTrunc, CI.getType());
8726 return BinaryOperator::Create(OpI->getOpcode(), LHSTrunc, RHSTrunc);
8735 Instruction *InstCombiner::visitFPExt(CastInst &CI) {
8736 return commonCastTransforms(CI);
8739 Instruction *InstCombiner::visitFPToUI(FPToUIInst &FI) {
8740 Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0));
8742 return commonCastTransforms(FI);
8744 // fptoui(uitofp(X)) --> X
8745 // fptoui(sitofp(X)) --> X
8746 // This is safe if the intermediate type has enough bits in its mantissa to
8747 // accurately represent all values of X. For example, do not do this with
8748 // i64->float->i64. This is also safe for sitofp case, because any negative
8749 // 'X' value would cause an undefined result for the fptoui.
8750 if ((isa<UIToFPInst>(OpI) || isa<SIToFPInst>(OpI)) &&
8751 OpI->getOperand(0)->getType() == FI.getType() &&
8752 (int)FI.getType()->getScalarSizeInBits() < /*extra bit for sign */
8753 OpI->getType()->getFPMantissaWidth())
8754 return ReplaceInstUsesWith(FI, OpI->getOperand(0));
8756 return commonCastTransforms(FI);
8759 Instruction *InstCombiner::visitFPToSI(FPToSIInst &FI) {
8760 Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0));
8762 return commonCastTransforms(FI);
8764 // fptosi(sitofp(X)) --> X
8765 // fptosi(uitofp(X)) --> X
8766 // This is safe if the intermediate type has enough bits in its mantissa to
8767 // accurately represent all values of X. For example, do not do this with
8768 // i64->float->i64. This is also safe for sitofp case, because any negative
8769 // 'X' value would cause an undefined result for the fptoui.
8770 if ((isa<UIToFPInst>(OpI) || isa<SIToFPInst>(OpI)) &&
8771 OpI->getOperand(0)->getType() == FI.getType() &&
8772 (int)FI.getType()->getScalarSizeInBits() <=
8773 OpI->getType()->getFPMantissaWidth())
8774 return ReplaceInstUsesWith(FI, OpI->getOperand(0));
8776 return commonCastTransforms(FI);
8779 Instruction *InstCombiner::visitUIToFP(CastInst &CI) {
8780 return commonCastTransforms(CI);
8783 Instruction *InstCombiner::visitSIToFP(CastInst &CI) {
8784 return commonCastTransforms(CI);
8787 Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) {
8788 // If the destination integer type is smaller than the intptr_t type for
8789 // this target, do a ptrtoint to intptr_t then do a trunc. This allows the
8790 // trunc to be exposed to other transforms. Don't do this for extending
8791 // ptrtoint's, because we don't know if the target sign or zero extends its
8794 CI.getType()->getScalarSizeInBits() < TD->getPointerSizeInBits()) {
8795 Value *P = Builder->CreatePtrToInt(CI.getOperand(0),
8796 TD->getIntPtrType(CI.getContext()),
8798 return new TruncInst(P, CI.getType());
8801 return commonPointerCastTransforms(CI);
8804 Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
8805 // If the source integer type is larger than the intptr_t type for
8806 // this target, do a trunc to the intptr_t type, then inttoptr of it. This
8807 // allows the trunc to be exposed to other transforms. Don't do this for
8808 // extending inttoptr's, because we don't know if the target sign or zero
8809 // extends to pointers.
8810 if (TD && CI.getOperand(0)->getType()->getScalarSizeInBits() >
8811 TD->getPointerSizeInBits()) {
8812 Value *P = Builder->CreateTrunc(CI.getOperand(0),
8813 TD->getIntPtrType(CI.getContext()), "tmp");
8814 return new IntToPtrInst(P, CI.getType());
8817 if (Instruction *I = commonCastTransforms(CI))
8823 Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
8824 // If the operands are integer typed then apply the integer transforms,
8825 // otherwise just apply the common ones.
8826 Value *Src = CI.getOperand(0);
8827 const Type *SrcTy = Src->getType();
8828 const Type *DestTy = CI.getType();
8830 if (isa<PointerType>(SrcTy)) {
8831 if (Instruction *I = commonPointerCastTransforms(CI))
8834 if (Instruction *Result = commonCastTransforms(CI))
8839 // Get rid of casts from one type to the same type. These are useless and can
8840 // be replaced by the operand.
8841 if (DestTy == Src->getType())
8842 return ReplaceInstUsesWith(CI, Src);
8844 if (const PointerType *DstPTy = dyn_cast<PointerType>(DestTy)) {
8845 const PointerType *SrcPTy = cast<PointerType>(SrcTy);
8846 const Type *DstElTy = DstPTy->getElementType();
8847 const Type *SrcElTy = SrcPTy->getElementType();
8849 // If the address spaces don't match, don't eliminate the bitcast, which is
8850 // required for changing types.
8851 if (SrcPTy->getAddressSpace() != DstPTy->getAddressSpace())
8854 // If we are casting a alloca to a pointer to a type of the same
8855 // size, rewrite the allocation instruction to allocate the "right" type.
8856 // There is no need to modify malloc calls because it is their bitcast that
8857 // needs to be cleaned up.
8858 if (AllocationInst *AI = dyn_cast<AllocationInst>(Src))
8859 if (Instruction *V = PromoteCastOfAllocation(CI, *AI))
8862 // If the source and destination are pointers, and this cast is equivalent
8863 // to a getelementptr X, 0, 0, 0... turn it into the appropriate gep.
8864 // This can enhance SROA and other transforms that want type-safe pointers.
8865 Constant *ZeroUInt = Constant::getNullValue(Type::getInt32Ty(*Context));
8866 unsigned NumZeros = 0;
8867 while (SrcElTy != DstElTy &&
8868 isa<CompositeType>(SrcElTy) && !isa<PointerType>(SrcElTy) &&
8869 SrcElTy->getNumContainedTypes() /* not "{}" */) {
8870 SrcElTy = cast<CompositeType>(SrcElTy)->getTypeAtIndex(ZeroUInt);
8874 // If we found a path from the src to dest, create the getelementptr now.
8875 if (SrcElTy == DstElTy) {
8876 SmallVector<Value*, 8> Idxs(NumZeros+1, ZeroUInt);
8877 return GetElementPtrInst::CreateInBounds(Src, Idxs.begin(), Idxs.end(), "",
8878 ((Instruction*) NULL));
8882 if (const VectorType *DestVTy = dyn_cast<VectorType>(DestTy)) {
8883 if (DestVTy->getNumElements() == 1) {
8884 if (!isa<VectorType>(SrcTy)) {
8885 Value *Elem = Builder->CreateBitCast(Src, DestVTy->getElementType());
8886 return InsertElementInst::Create(UndefValue::get(DestTy), Elem,
8887 Constant::getNullValue(Type::getInt32Ty(*Context)));
8889 // FIXME: Canonicalize bitcast(insertelement) -> insertelement(bitcast)
8893 if (const VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy)) {
8894 if (SrcVTy->getNumElements() == 1) {
8895 if (!isa<VectorType>(DestTy)) {
8897 Builder->CreateExtractElement(Src,
8898 Constant::getNullValue(Type::getInt32Ty(*Context)));
8899 return CastInst::Create(Instruction::BitCast, Elem, DestTy);
8904 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(Src)) {
8905 if (SVI->hasOneUse()) {
8906 // Okay, we have (bitconvert (shuffle ..)). Check to see if this is
8907 // a bitconvert to a vector with the same # elts.
8908 if (isa<VectorType>(DestTy) &&
8909 cast<VectorType>(DestTy)->getNumElements() ==
8910 SVI->getType()->getNumElements() &&
8911 SVI->getType()->getNumElements() ==
8912 cast<VectorType>(SVI->getOperand(0)->getType())->getNumElements()) {
8914 // If either of the operands is a cast from CI.getType(), then
8915 // evaluating the shuffle in the casted destination's type will allow
8916 // us to eliminate at least one cast.
8917 if (((Tmp = dyn_cast<CastInst>(SVI->getOperand(0))) &&
8918 Tmp->getOperand(0)->getType() == DestTy) ||
8919 ((Tmp = dyn_cast<CastInst>(SVI->getOperand(1))) &&
8920 Tmp->getOperand(0)->getType() == DestTy)) {
8921 Value *LHS = Builder->CreateBitCast(SVI->getOperand(0), DestTy);
8922 Value *RHS = Builder->CreateBitCast(SVI->getOperand(1), DestTy);
8923 // Return a new shuffle vector. Use the same element ID's, as we
8924 // know the vector types match #elts.
8925 return new ShuffleVectorInst(LHS, RHS, SVI->getOperand(2));
8933 /// GetSelectFoldableOperands - We want to turn code that looks like this:
8935 /// %D = select %cond, %C, %A
8937 /// %C = select %cond, %B, 0
8940 /// Assuming that the specified instruction is an operand to the select, return
8941 /// a bitmask indicating which operands of this instruction are foldable if they
8942 /// equal the other incoming value of the select.
8944 static unsigned GetSelectFoldableOperands(Instruction *I) {
8945 switch (I->getOpcode()) {
8946 case Instruction::Add:
8947 case Instruction::Mul:
8948 case Instruction::And:
8949 case Instruction::Or:
8950 case Instruction::Xor:
8951 return 3; // Can fold through either operand.
8952 case Instruction::Sub: // Can only fold on the amount subtracted.
8953 case Instruction::Shl: // Can only fold on the shift amount.
8954 case Instruction::LShr:
8955 case Instruction::AShr:
8958 return 0; // Cannot fold
8962 /// GetSelectFoldableConstant - For the same transformation as the previous
8963 /// function, return the identity constant that goes into the select.
8964 static Constant *GetSelectFoldableConstant(Instruction *I,
8965 LLVMContext *Context) {
8966 switch (I->getOpcode()) {
8967 default: llvm_unreachable("This cannot happen!");
8968 case Instruction::Add:
8969 case Instruction::Sub:
8970 case Instruction::Or:
8971 case Instruction::Xor:
8972 case Instruction::Shl:
8973 case Instruction::LShr:
8974 case Instruction::AShr:
8975 return Constant::getNullValue(I->getType());
8976 case Instruction::And:
8977 return Constant::getAllOnesValue(I->getType());
8978 case Instruction::Mul:
8979 return ConstantInt::get(I->getType(), 1);
8983 /// FoldSelectOpOp - Here we have (select c, TI, FI), and we know that TI and FI
8984 /// have the same opcode and only one use each. Try to simplify this.
8985 Instruction *InstCombiner::FoldSelectOpOp(SelectInst &SI, Instruction *TI,
8987 if (TI->getNumOperands() == 1) {
8988 // If this is a non-volatile load or a cast from the same type,
8991 if (TI->getOperand(0)->getType() != FI->getOperand(0)->getType())
8994 return 0; // unknown unary op.
8997 // Fold this by inserting a select from the input values.
8998 SelectInst *NewSI = SelectInst::Create(SI.getCondition(), TI->getOperand(0),
8999 FI->getOperand(0), SI.getName()+".v");
9000 InsertNewInstBefore(NewSI, SI);
9001 return CastInst::Create(Instruction::CastOps(TI->getOpcode()), NewSI,
9005 // Only handle binary operators here.
9006 if (!isa<BinaryOperator>(TI))
9009 // Figure out if the operations have any operands in common.
9010 Value *MatchOp, *OtherOpT, *OtherOpF;
9012 if (TI->getOperand(0) == FI->getOperand(0)) {
9013 MatchOp = TI->getOperand(0);
9014 OtherOpT = TI->getOperand(1);
9015 OtherOpF = FI->getOperand(1);
9016 MatchIsOpZero = true;
9017 } else if (TI->getOperand(1) == FI->getOperand(1)) {
9018 MatchOp = TI->getOperand(1);
9019 OtherOpT = TI->getOperand(0);
9020 OtherOpF = FI->getOperand(0);
9021 MatchIsOpZero = false;
9022 } else if (!TI->isCommutative()) {
9024 } else if (TI->getOperand(0) == FI->getOperand(1)) {
9025 MatchOp = TI->getOperand(0);
9026 OtherOpT = TI->getOperand(1);
9027 OtherOpF = FI->getOperand(0);
9028 MatchIsOpZero = true;
9029 } else if (TI->getOperand(1) == FI->getOperand(0)) {
9030 MatchOp = TI->getOperand(1);
9031 OtherOpT = TI->getOperand(0);
9032 OtherOpF = FI->getOperand(1);
9033 MatchIsOpZero = true;
9038 // If we reach here, they do have operations in common.
9039 SelectInst *NewSI = SelectInst::Create(SI.getCondition(), OtherOpT,
9040 OtherOpF, SI.getName()+".v");
9041 InsertNewInstBefore(NewSI, SI);
9043 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(TI)) {
9045 return BinaryOperator::Create(BO->getOpcode(), MatchOp, NewSI);
9047 return BinaryOperator::Create(BO->getOpcode(), NewSI, MatchOp);
9049 llvm_unreachable("Shouldn't get here");
9053 static bool isSelect01(Constant *C1, Constant *C2) {
9054 ConstantInt *C1I = dyn_cast<ConstantInt>(C1);
9057 ConstantInt *C2I = dyn_cast<ConstantInt>(C2);
9060 return (C1I->isZero() || C1I->isOne()) && (C2I->isZero() || C2I->isOne());
9063 /// FoldSelectIntoOp - Try fold the select into one of the operands to
9064 /// facilitate further optimization.
9065 Instruction *InstCombiner::FoldSelectIntoOp(SelectInst &SI, Value *TrueVal,
9067 // See the comment above GetSelectFoldableOperands for a description of the
9068 // transformation we are doing here.
9069 if (Instruction *TVI = dyn_cast<Instruction>(TrueVal)) {
9070 if (TVI->hasOneUse() && TVI->getNumOperands() == 2 &&
9071 !isa<Constant>(FalseVal)) {
9072 if (unsigned SFO = GetSelectFoldableOperands(TVI)) {
9073 unsigned OpToFold = 0;
9074 if ((SFO & 1) && FalseVal == TVI->getOperand(0)) {
9076 } else if ((SFO & 2) && FalseVal == TVI->getOperand(1)) {
9081 Constant *C = GetSelectFoldableConstant(TVI, Context);
9082 Value *OOp = TVI->getOperand(2-OpToFold);
9083 // Avoid creating select between 2 constants unless it's selecting
9085 if (!isa<Constant>(OOp) || isSelect01(C, cast<Constant>(OOp))) {
9086 Instruction *NewSel = SelectInst::Create(SI.getCondition(), OOp, C);
9087 InsertNewInstBefore(NewSel, SI);
9088 NewSel->takeName(TVI);
9089 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(TVI))
9090 return BinaryOperator::Create(BO->getOpcode(), FalseVal, NewSel);
9091 llvm_unreachable("Unknown instruction!!");
9098 if (Instruction *FVI = dyn_cast<Instruction>(FalseVal)) {
9099 if (FVI->hasOneUse() && FVI->getNumOperands() == 2 &&
9100 !isa<Constant>(TrueVal)) {
9101 if (unsigned SFO = GetSelectFoldableOperands(FVI)) {
9102 unsigned OpToFold = 0;
9103 if ((SFO & 1) && TrueVal == FVI->getOperand(0)) {
9105 } else if ((SFO & 2) && TrueVal == FVI->getOperand(1)) {
9110 Constant *C = GetSelectFoldableConstant(FVI, Context);
9111 Value *OOp = FVI->getOperand(2-OpToFold);
9112 // Avoid creating select between 2 constants unless it's selecting
9114 if (!isa<Constant>(OOp) || isSelect01(C, cast<Constant>(OOp))) {
9115 Instruction *NewSel = SelectInst::Create(SI.getCondition(), C, OOp);
9116 InsertNewInstBefore(NewSel, SI);
9117 NewSel->takeName(FVI);
9118 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FVI))
9119 return BinaryOperator::Create(BO->getOpcode(), TrueVal, NewSel);
9120 llvm_unreachable("Unknown instruction!!");
9130 /// visitSelectInstWithICmp - Visit a SelectInst that has an
9131 /// ICmpInst as its first operand.
9133 Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI,
9135 bool Changed = false;
9136 ICmpInst::Predicate Pred = ICI->getPredicate();
9137 Value *CmpLHS = ICI->getOperand(0);
9138 Value *CmpRHS = ICI->getOperand(1);
9139 Value *TrueVal = SI.getTrueValue();
9140 Value *FalseVal = SI.getFalseValue();
9142 // Check cases where the comparison is with a constant that
9143 // can be adjusted to fit the min/max idiom. We may edit ICI in
9144 // place here, so make sure the select is the only user.
9145 if (ICI->hasOneUse())
9146 if (ConstantInt *CI = dyn_cast<ConstantInt>(CmpRHS)) {
9149 case ICmpInst::ICMP_ULT:
9150 case ICmpInst::ICMP_SLT: {
9151 // X < MIN ? T : F --> F
9152 if (CI->isMinValue(Pred == ICmpInst::ICMP_SLT))
9153 return ReplaceInstUsesWith(SI, FalseVal);
9154 // X < C ? X : C-1 --> X > C-1 ? C-1 : X
9155 Constant *AdjustedRHS = SubOne(CI);
9156 if ((CmpLHS == TrueVal && AdjustedRHS == FalseVal) ||
9157 (CmpLHS == FalseVal && AdjustedRHS == TrueVal)) {
9158 Pred = ICmpInst::getSwappedPredicate(Pred);
9159 CmpRHS = AdjustedRHS;
9160 std::swap(FalseVal, TrueVal);
9161 ICI->setPredicate(Pred);
9162 ICI->setOperand(1, CmpRHS);
9163 SI.setOperand(1, TrueVal);
9164 SI.setOperand(2, FalseVal);
9169 case ICmpInst::ICMP_UGT:
9170 case ICmpInst::ICMP_SGT: {
9171 // X > MAX ? T : F --> F
9172 if (CI->isMaxValue(Pred == ICmpInst::ICMP_SGT))
9173 return ReplaceInstUsesWith(SI, FalseVal);
9174 // X > C ? X : C+1 --> X < C+1 ? C+1 : X
9175 Constant *AdjustedRHS = AddOne(CI);
9176 if ((CmpLHS == TrueVal && AdjustedRHS == FalseVal) ||
9177 (CmpLHS == FalseVal && AdjustedRHS == TrueVal)) {
9178 Pred = ICmpInst::getSwappedPredicate(Pred);
9179 CmpRHS = AdjustedRHS;
9180 std::swap(FalseVal, TrueVal);
9181 ICI->setPredicate(Pred);
9182 ICI->setOperand(1, CmpRHS);
9183 SI.setOperand(1, TrueVal);
9184 SI.setOperand(2, FalseVal);
9191 // (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if signed
9192 // (x >s -1) ? -1 : 0 -> ashr x, 31 -> all ones if not signed
9193 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
9194 if (match(TrueVal, m_ConstantInt<-1>()) &&
9195 match(FalseVal, m_ConstantInt<0>()))
9196 Pred = ICI->getPredicate();
9197 else if (match(TrueVal, m_ConstantInt<0>()) &&
9198 match(FalseVal, m_ConstantInt<-1>()))
9199 Pred = CmpInst::getInversePredicate(ICI->getPredicate());
9201 if (Pred != CmpInst::BAD_ICMP_PREDICATE) {
9202 // If we are just checking for a icmp eq of a single bit and zext'ing it
9203 // to an integer, then shift the bit to the appropriate place and then
9204 // cast to integer to avoid the comparison.
9205 const APInt &Op1CV = CI->getValue();
9207 // sext (x <s 0) to i32 --> x>>s31 true if signbit set.
9208 // sext (x >s -1) to i32 --> (x>>s31)^-1 true if signbit clear.
9209 if ((Pred == ICmpInst::ICMP_SLT && Op1CV == 0) ||
9210 (Pred == ICmpInst::ICMP_SGT && Op1CV.isAllOnesValue())) {
9211 Value *In = ICI->getOperand(0);
9212 Value *Sh = ConstantInt::get(In->getType(),
9213 In->getType()->getScalarSizeInBits()-1);
9214 In = InsertNewInstBefore(BinaryOperator::CreateAShr(In, Sh,
9215 In->getName()+".lobit"),
9217 if (In->getType() != SI.getType())
9218 In = CastInst::CreateIntegerCast(In, SI.getType(),
9219 true/*SExt*/, "tmp", ICI);
9221 if (Pred == ICmpInst::ICMP_SGT)
9222 In = InsertNewInstBefore(BinaryOperator::CreateNot(In,
9223 In->getName()+".not"), *ICI);
9225 return ReplaceInstUsesWith(SI, In);
9230 if (CmpLHS == TrueVal && CmpRHS == FalseVal) {
9231 // Transform (X == Y) ? X : Y -> Y
9232 if (Pred == ICmpInst::ICMP_EQ)
9233 return ReplaceInstUsesWith(SI, FalseVal);
9234 // Transform (X != Y) ? X : Y -> X
9235 if (Pred == ICmpInst::ICMP_NE)
9236 return ReplaceInstUsesWith(SI, TrueVal);
9237 /// NOTE: if we wanted to, this is where to detect integer MIN/MAX
9239 } else if (CmpLHS == FalseVal && CmpRHS == TrueVal) {
9240 // Transform (X == Y) ? Y : X -> X
9241 if (Pred == ICmpInst::ICMP_EQ)
9242 return ReplaceInstUsesWith(SI, FalseVal);
9243 // Transform (X != Y) ? Y : X -> Y
9244 if (Pred == ICmpInst::ICMP_NE)
9245 return ReplaceInstUsesWith(SI, TrueVal);
9246 /// NOTE: if we wanted to, this is where to detect integer MIN/MAX
9249 /// NOTE: if we wanted to, this is where to detect integer ABS
9251 return Changed ? &SI : 0;
9254 /// isDefinedInBB - Return true if the value is an instruction defined in the
9255 /// specified basicblock.
9256 static bool isDefinedInBB(const Value *V, const BasicBlock *BB) {
9257 const Instruction *I = dyn_cast<Instruction>(V);
9258 return I != 0 && I->getParent() == BB;
9262 Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
9263 Value *CondVal = SI.getCondition();
9264 Value *TrueVal = SI.getTrueValue();
9265 Value *FalseVal = SI.getFalseValue();
9267 // select true, X, Y -> X
9268 // select false, X, Y -> Y
9269 if (ConstantInt *C = dyn_cast<ConstantInt>(CondVal))
9270 return ReplaceInstUsesWith(SI, C->getZExtValue() ? TrueVal : FalseVal);
9272 // select C, X, X -> X
9273 if (TrueVal == FalseVal)
9274 return ReplaceInstUsesWith(SI, TrueVal);
9276 if (isa<UndefValue>(TrueVal)) // select C, undef, X -> X
9277 return ReplaceInstUsesWith(SI, FalseVal);
9278 if (isa<UndefValue>(FalseVal)) // select C, X, undef -> X
9279 return ReplaceInstUsesWith(SI, TrueVal);
9280 if (isa<UndefValue>(CondVal)) { // select undef, X, Y -> X or Y
9281 if (isa<Constant>(TrueVal))
9282 return ReplaceInstUsesWith(SI, TrueVal);
9284 return ReplaceInstUsesWith(SI, FalseVal);
9287 if (SI.getType() == Type::getInt1Ty(*Context)) {
9288 if (ConstantInt *C = dyn_cast<ConstantInt>(TrueVal)) {
9289 if (C->getZExtValue()) {
9290 // Change: A = select B, true, C --> A = or B, C
9291 return BinaryOperator::CreateOr(CondVal, FalseVal);
9293 // Change: A = select B, false, C --> A = and !B, C
9295 InsertNewInstBefore(BinaryOperator::CreateNot(CondVal,
9296 "not."+CondVal->getName()), SI);
9297 return BinaryOperator::CreateAnd(NotCond, FalseVal);
9299 } else if (ConstantInt *C = dyn_cast<ConstantInt>(FalseVal)) {
9300 if (C->getZExtValue() == false) {
9301 // Change: A = select B, C, false --> A = and B, C
9302 return BinaryOperator::CreateAnd(CondVal, TrueVal);
9304 // Change: A = select B, C, true --> A = or !B, C
9306 InsertNewInstBefore(BinaryOperator::CreateNot(CondVal,
9307 "not."+CondVal->getName()), SI);
9308 return BinaryOperator::CreateOr(NotCond, TrueVal);
9312 // select a, b, a -> a&b
9313 // select a, a, b -> a|b
9314 if (CondVal == TrueVal)
9315 return BinaryOperator::CreateOr(CondVal, FalseVal);
9316 else if (CondVal == FalseVal)
9317 return BinaryOperator::CreateAnd(CondVal, TrueVal);
9320 // Selecting between two integer constants?
9321 if (ConstantInt *TrueValC = dyn_cast<ConstantInt>(TrueVal))
9322 if (ConstantInt *FalseValC = dyn_cast<ConstantInt>(FalseVal)) {
9323 // select C, 1, 0 -> zext C to int
9324 if (FalseValC->isZero() && TrueValC->getValue() == 1) {
9325 return CastInst::Create(Instruction::ZExt, CondVal, SI.getType());
9326 } else if (TrueValC->isZero() && FalseValC->getValue() == 1) {
9327 // select C, 0, 1 -> zext !C to int
9329 InsertNewInstBefore(BinaryOperator::CreateNot(CondVal,
9330 "not."+CondVal->getName()), SI);
9331 return CastInst::Create(Instruction::ZExt, NotCond, SI.getType());
9334 if (ICmpInst *IC = dyn_cast<ICmpInst>(SI.getCondition())) {
9335 // If one of the constants is zero (we know they can't both be) and we
9336 // have an icmp instruction with zero, and we have an 'and' with the
9337 // non-constant value, eliminate this whole mess. This corresponds to
9338 // cases like this: ((X & 27) ? 27 : 0)
9339 if (TrueValC->isZero() || FalseValC->isZero())
9340 if (IC->isEquality() && isa<ConstantInt>(IC->getOperand(1)) &&
9341 cast<Constant>(IC->getOperand(1))->isNullValue())
9342 if (Instruction *ICA = dyn_cast<Instruction>(IC->getOperand(0)))
9343 if (ICA->getOpcode() == Instruction::And &&
9344 isa<ConstantInt>(ICA->getOperand(1)) &&
9345 (ICA->getOperand(1) == TrueValC ||
9346 ICA->getOperand(1) == FalseValC) &&
9347 isOneBitSet(cast<ConstantInt>(ICA->getOperand(1)))) {
9348 // Okay, now we know that everything is set up, we just don't
9349 // know whether we have a icmp_ne or icmp_eq and whether the
9350 // true or false val is the zero.
9351 bool ShouldNotVal = !TrueValC->isZero();
9352 ShouldNotVal ^= IC->getPredicate() == ICmpInst::ICMP_NE;
9355 V = InsertNewInstBefore(BinaryOperator::Create(
9356 Instruction::Xor, V, ICA->getOperand(1)), SI);
9357 return ReplaceInstUsesWith(SI, V);
9362 // See if we are selecting two values based on a comparison of the two values.
9363 if (FCmpInst *FCI = dyn_cast<FCmpInst>(CondVal)) {
9364 if (FCI->getOperand(0) == TrueVal && FCI->getOperand(1) == FalseVal) {
9365 // Transform (X == Y) ? X : Y -> Y
9366 if (FCI->getPredicate() == FCmpInst::FCMP_OEQ) {
9367 // This is not safe in general for floating point:
9368 // consider X== -0, Y== +0.
9369 // It becomes safe if either operand is a nonzero constant.
9370 ConstantFP *CFPt, *CFPf;
9371 if (((CFPt = dyn_cast<ConstantFP>(TrueVal)) &&
9372 !CFPt->getValueAPF().isZero()) ||
9373 ((CFPf = dyn_cast<ConstantFP>(FalseVal)) &&
9374 !CFPf->getValueAPF().isZero()))
9375 return ReplaceInstUsesWith(SI, FalseVal);
9377 // Transform (X != Y) ? X : Y -> X
9378 if (FCI->getPredicate() == FCmpInst::FCMP_ONE)
9379 return ReplaceInstUsesWith(SI, TrueVal);
9380 // NOTE: if we wanted to, this is where to detect MIN/MAX
9382 } else if (FCI->getOperand(0) == FalseVal && FCI->getOperand(1) == TrueVal){
9383 // Transform (X == Y) ? Y : X -> X
9384 if (FCI->getPredicate() == FCmpInst::FCMP_OEQ) {
9385 // This is not safe in general for floating point:
9386 // consider X== -0, Y== +0.
9387 // It becomes safe if either operand is a nonzero constant.
9388 ConstantFP *CFPt, *CFPf;
9389 if (((CFPt = dyn_cast<ConstantFP>(TrueVal)) &&
9390 !CFPt->getValueAPF().isZero()) ||
9391 ((CFPf = dyn_cast<ConstantFP>(FalseVal)) &&
9392 !CFPf->getValueAPF().isZero()))
9393 return ReplaceInstUsesWith(SI, FalseVal);
9395 // Transform (X != Y) ? Y : X -> Y
9396 if (FCI->getPredicate() == FCmpInst::FCMP_ONE)
9397 return ReplaceInstUsesWith(SI, TrueVal);
9398 // NOTE: if we wanted to, this is where to detect MIN/MAX
9400 // NOTE: if we wanted to, this is where to detect ABS
9403 // See if we are selecting two values based on a comparison of the two values.
9404 if (ICmpInst *ICI = dyn_cast<ICmpInst>(CondVal))
9405 if (Instruction *Result = visitSelectInstWithICmp(SI, ICI))
9408 if (Instruction *TI = dyn_cast<Instruction>(TrueVal))
9409 if (Instruction *FI = dyn_cast<Instruction>(FalseVal))
9410 if (TI->hasOneUse() && FI->hasOneUse()) {
9411 Instruction *AddOp = 0, *SubOp = 0;
9413 // Turn (select C, (op X, Y), (op X, Z)) -> (op X, (select C, Y, Z))
9414 if (TI->getOpcode() == FI->getOpcode())
9415 if (Instruction *IV = FoldSelectOpOp(SI, TI, FI))
9418 // Turn select C, (X+Y), (X-Y) --> (X+(select C, Y, (-Y))). This is
9419 // even legal for FP.
9420 if ((TI->getOpcode() == Instruction::Sub &&
9421 FI->getOpcode() == Instruction::Add) ||
9422 (TI->getOpcode() == Instruction::FSub &&
9423 FI->getOpcode() == Instruction::FAdd)) {
9424 AddOp = FI; SubOp = TI;
9425 } else if ((FI->getOpcode() == Instruction::Sub &&
9426 TI->getOpcode() == Instruction::Add) ||
9427 (FI->getOpcode() == Instruction::FSub &&
9428 TI->getOpcode() == Instruction::FAdd)) {
9429 AddOp = TI; SubOp = FI;
9433 Value *OtherAddOp = 0;
9434 if (SubOp->getOperand(0) == AddOp->getOperand(0)) {
9435 OtherAddOp = AddOp->getOperand(1);
9436 } else if (SubOp->getOperand(0) == AddOp->getOperand(1)) {
9437 OtherAddOp = AddOp->getOperand(0);
9441 // So at this point we know we have (Y -> OtherAddOp):
9442 // select C, (add X, Y), (sub X, Z)
9443 Value *NegVal; // Compute -Z
9444 if (Constant *C = dyn_cast<Constant>(SubOp->getOperand(1))) {
9445 NegVal = ConstantExpr::getNeg(C);
9447 NegVal = InsertNewInstBefore(
9448 BinaryOperator::CreateNeg(SubOp->getOperand(1),
9452 Value *NewTrueOp = OtherAddOp;
9453 Value *NewFalseOp = NegVal;
9455 std::swap(NewTrueOp, NewFalseOp);
9456 Instruction *NewSel =
9457 SelectInst::Create(CondVal, NewTrueOp,
9458 NewFalseOp, SI.getName() + ".p");
9460 NewSel = InsertNewInstBefore(NewSel, SI);
9461 return BinaryOperator::CreateAdd(SubOp->getOperand(0), NewSel);
9466 // See if we can fold the select into one of our operands.
9467 if (SI.getType()->isInteger()) {
9468 Instruction *FoldI = FoldSelectIntoOp(SI, TrueVal, FalseVal);
9473 // See if we can fold the select into a phi node. The true/false values have
9474 // to be live in the predecessor blocks. If they are instructions in SI's
9475 // block, we can't map to the predecessor.
9476 if (isa<PHINode>(SI.getCondition()) &&
9477 (!isDefinedInBB(SI.getTrueValue(), SI.getParent()) ||
9478 isa<PHINode>(SI.getTrueValue())) &&
9479 (!isDefinedInBB(SI.getFalseValue(), SI.getParent()) ||
9480 isa<PHINode>(SI.getFalseValue())))
9481 if (Instruction *NV = FoldOpIntoPhi(SI))
9484 if (BinaryOperator::isNot(CondVal)) {
9485 SI.setOperand(0, BinaryOperator::getNotArgument(CondVal));
9486 SI.setOperand(1, FalseVal);
9487 SI.setOperand(2, TrueVal);
9494 /// EnforceKnownAlignment - If the specified pointer points to an object that
9495 /// we control, modify the object's alignment to PrefAlign. This isn't
9496 /// often possible though. If alignment is important, a more reliable approach
9497 /// is to simply align all global variables and allocation instructions to
9498 /// their preferred alignment from the beginning.
9500 static unsigned EnforceKnownAlignment(Value *V,
9501 unsigned Align, unsigned PrefAlign) {
9503 User *U = dyn_cast<User>(V);
9504 if (!U) return Align;
9506 switch (Operator::getOpcode(U)) {
9508 case Instruction::BitCast:
9509 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
9510 case Instruction::GetElementPtr: {
9511 // If all indexes are zero, it is just the alignment of the base pointer.
9512 bool AllZeroOperands = true;
9513 for (User::op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i)
9514 if (!isa<Constant>(*i) ||
9515 !cast<Constant>(*i)->isNullValue()) {
9516 AllZeroOperands = false;
9520 if (AllZeroOperands) {
9521 // Treat this like a bitcast.
9522 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
9528 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
9529 // If there is a large requested alignment and we can, bump up the alignment
9531 if (!GV->isDeclaration()) {
9532 if (GV->getAlignment() >= PrefAlign)
9533 Align = GV->getAlignment();
9535 GV->setAlignment(PrefAlign);
9539 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
9540 // If there is a requested alignment and if this is an alloca, round up.
9541 if (AI->getAlignment() >= PrefAlign)
9542 Align = AI->getAlignment();
9544 AI->setAlignment(PrefAlign);
9552 /// GetOrEnforceKnownAlignment - If the specified pointer has an alignment that
9553 /// we can determine, return it, otherwise return 0. If PrefAlign is specified,
9554 /// and it is more than the alignment of the ultimate object, see if we can
9555 /// increase the alignment of the ultimate object, making this check succeed.
9556 unsigned InstCombiner::GetOrEnforceKnownAlignment(Value *V,
9557 unsigned PrefAlign) {
9558 unsigned BitWidth = TD ? TD->getTypeSizeInBits(V->getType()) :
9559 sizeof(PrefAlign) * CHAR_BIT;
9560 APInt Mask = APInt::getAllOnesValue(BitWidth);
9561 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
9562 ComputeMaskedBits(V, Mask, KnownZero, KnownOne);
9563 unsigned TrailZ = KnownZero.countTrailingOnes();
9564 unsigned Align = 1u << std::min(BitWidth - 1, TrailZ);
9566 if (PrefAlign > Align)
9567 Align = EnforceKnownAlignment(V, Align, PrefAlign);
9569 // We don't need to make any adjustment.
9573 Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
9574 unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getOperand(1));
9575 unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getOperand(2));
9576 unsigned MinAlign = std::min(DstAlign, SrcAlign);
9577 unsigned CopyAlign = MI->getAlignment();
9579 if (CopyAlign < MinAlign) {
9580 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
9585 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
9587 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getOperand(3));
9588 if (MemOpLength == 0) return 0;
9590 // Source and destination pointer types are always "i8*" for intrinsic. See
9591 // if the size is something we can handle with a single primitive load/store.
9592 // A single load+store correctly handles overlapping memory in the memmove
9594 unsigned Size = MemOpLength->getZExtValue();
9595 if (Size == 0) return MI; // Delete this mem transfer.
9597 if (Size > 8 || (Size&(Size-1)))
9598 return 0; // If not 1/2/4/8 bytes, exit.
9600 // Use an integer load+store unless we can find something better.
9602 PointerType::getUnqual(IntegerType::get(*Context, Size<<3));
9604 // Memcpy forces the use of i8* for the source and destination. That means
9605 // that if you're using memcpy to move one double around, you'll get a cast
9606 // from double* to i8*. We'd much rather use a double load+store rather than
9607 // an i64 load+store, here because this improves the odds that the source or
9608 // dest address will be promotable. See if we can find a better type than the
9609 // integer datatype.
9610 if (Value *Op = getBitCastOperand(MI->getOperand(1))) {
9611 const Type *SrcETy = cast<PointerType>(Op->getType())->getElementType();
9612 if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
9613 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip
9614 // down through these levels if so.
9615 while (!SrcETy->isSingleValueType()) {
9616 if (const StructType *STy = dyn_cast<StructType>(SrcETy)) {
9617 if (STy->getNumElements() == 1)
9618 SrcETy = STy->getElementType(0);
9621 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) {
9622 if (ATy->getNumElements() == 1)
9623 SrcETy = ATy->getElementType();
9630 if (SrcETy->isSingleValueType())
9631 NewPtrTy = PointerType::getUnqual(SrcETy);
9636 // If the memcpy/memmove provides better alignment info than we can
9638 SrcAlign = std::max(SrcAlign, CopyAlign);
9639 DstAlign = std::max(DstAlign, CopyAlign);
9641 Value *Src = Builder->CreateBitCast(MI->getOperand(2), NewPtrTy);
9642 Value *Dest = Builder->CreateBitCast(MI->getOperand(1), NewPtrTy);
9643 Instruction *L = new LoadInst(Src, "tmp", false, SrcAlign);
9644 InsertNewInstBefore(L, *MI);
9645 InsertNewInstBefore(new StoreInst(L, Dest, false, DstAlign), *MI);
9647 // Set the size of the copy to 0, it will be deleted on the next iteration.
9648 MI->setOperand(3, Constant::getNullValue(MemOpLength->getType()));
9652 Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
9653 unsigned Alignment = GetOrEnforceKnownAlignment(MI->getDest());
9654 if (MI->getAlignment() < Alignment) {
9655 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
9660 // Extract the length and alignment and fill if they are constant.
9661 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
9662 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
9663 if (!LenC || !FillC || FillC->getType() != Type::getInt8Ty(*Context))
9665 uint64_t Len = LenC->getZExtValue();
9666 Alignment = MI->getAlignment();
9668 // If the length is zero, this is a no-op
9669 if (Len == 0) return MI; // memset(d,c,0,a) -> noop
9671 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
9672 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
9673 const Type *ITy = IntegerType::get(*Context, Len*8); // n=1 -> i8.
9675 Value *Dest = MI->getDest();
9676 Dest = Builder->CreateBitCast(Dest, PointerType::getUnqual(ITy));
9678 // Alignment 0 is identity for alignment 1 for memset, but not store.
9679 if (Alignment == 0) Alignment = 1;
9681 // Extract the fill value and store.
9682 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
9683 InsertNewInstBefore(new StoreInst(ConstantInt::get(ITy, Fill),
9684 Dest, false, Alignment), *MI);
9686 // Set the size of the copy to 0, it will be deleted on the next iteration.
9687 MI->setLength(Constant::getNullValue(LenC->getType()));
9695 /// visitCallInst - CallInst simplification. This mostly only handles folding
9696 /// of intrinsic instructions. For normal calls, it allows visitCallSite to do
9697 /// the heavy lifting.
9699 Instruction *InstCombiner::visitCallInst(CallInst &CI) {
9700 // If the caller function is nounwind, mark the call as nounwind, even if the
9702 if (CI.getParent()->getParent()->doesNotThrow() &&
9703 !CI.doesNotThrow()) {
9704 CI.setDoesNotThrow();
9708 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
9709 if (!II) return visitCallSite(&CI);
9711 // Intrinsics cannot occur in an invoke, so handle them here instead of in
9713 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
9714 bool Changed = false;
9716 // memmove/cpy/set of zero bytes is a noop.
9717 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
9718 if (NumBytes->isNullValue()) return EraseInstFromFunction(CI);
9720 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
9721 if (CI->getZExtValue() == 1) {
9722 // Replace the instruction with just byte operations. We would
9723 // transform other cases to loads/stores, but we don't know if
9724 // alignment is sufficient.
9728 // If we have a memmove and the source operation is a constant global,
9729 // then the source and dest pointers can't alias, so we can change this
9730 // into a call to memcpy.
9731 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
9732 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
9733 if (GVSrc->isConstant()) {
9734 Module *M = CI.getParent()->getParent()->getParent();
9735 Intrinsic::ID MemCpyID = Intrinsic::memcpy;
9737 Tys[0] = CI.getOperand(3)->getType();
9739 Intrinsic::getDeclaration(M, MemCpyID, Tys, 1));
9743 // memmove(x,x,size) -> noop.
9744 if (MMI->getSource() == MMI->getDest())
9745 return EraseInstFromFunction(CI);
9748 // If we can determine a pointer alignment that is bigger than currently
9749 // set, update the alignment.
9750 if (isa<MemTransferInst>(MI)) {
9751 if (Instruction *I = SimplifyMemTransfer(MI))
9753 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
9754 if (Instruction *I = SimplifyMemSet(MSI))
9758 if (Changed) return II;
9761 switch (II->getIntrinsicID()) {
9763 case Intrinsic::bswap:
9764 // bswap(bswap(x)) -> x
9765 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getOperand(1)))
9766 if (Operand->getIntrinsicID() == Intrinsic::bswap)
9767 return ReplaceInstUsesWith(CI, Operand->getOperand(1));
9769 case Intrinsic::ppc_altivec_lvx:
9770 case Intrinsic::ppc_altivec_lvxl:
9771 case Intrinsic::x86_sse_loadu_ps:
9772 case Intrinsic::x86_sse2_loadu_pd:
9773 case Intrinsic::x86_sse2_loadu_dq:
9774 // Turn PPC lvx -> load if the pointer is known aligned.
9775 // Turn X86 loadups -> load if the pointer is known aligned.
9776 if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) {
9777 Value *Ptr = Builder->CreateBitCast(II->getOperand(1),
9778 PointerType::getUnqual(II->getType()));
9779 return new LoadInst(Ptr);
9782 case Intrinsic::ppc_altivec_stvx:
9783 case Intrinsic::ppc_altivec_stvxl:
9784 // Turn stvx -> store if the pointer is known aligned.
9785 if (GetOrEnforceKnownAlignment(II->getOperand(2), 16) >= 16) {
9786 const Type *OpPtrTy =
9787 PointerType::getUnqual(II->getOperand(1)->getType());
9788 Value *Ptr = Builder->CreateBitCast(II->getOperand(2), OpPtrTy);
9789 return new StoreInst(II->getOperand(1), Ptr);
9792 case Intrinsic::x86_sse_storeu_ps:
9793 case Intrinsic::x86_sse2_storeu_pd:
9794 case Intrinsic::x86_sse2_storeu_dq:
9795 // Turn X86 storeu -> store if the pointer is known aligned.
9796 if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) {
9797 const Type *OpPtrTy =
9798 PointerType::getUnqual(II->getOperand(2)->getType());
9799 Value *Ptr = Builder->CreateBitCast(II->getOperand(1), OpPtrTy);
9800 return new StoreInst(II->getOperand(2), Ptr);
9804 case Intrinsic::x86_sse_cvttss2si: {
9805 // These intrinsics only demands the 0th element of its input vector. If
9806 // we can simplify the input based on that, do so now.
9808 cast<VectorType>(II->getOperand(1)->getType())->getNumElements();
9809 APInt DemandedElts(VWidth, 1);
9810 APInt UndefElts(VWidth, 0);
9811 if (Value *V = SimplifyDemandedVectorElts(II->getOperand(1), DemandedElts,
9813 II->setOperand(1, V);
9819 case Intrinsic::ppc_altivec_vperm:
9820 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
9821 if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getOperand(3))) {
9822 assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!");
9824 // Check that all of the elements are integer constants or undefs.
9825 bool AllEltsOk = true;
9826 for (unsigned i = 0; i != 16; ++i) {
9827 if (!isa<ConstantInt>(Mask->getOperand(i)) &&
9828 !isa<UndefValue>(Mask->getOperand(i))) {
9835 // Cast the input vectors to byte vectors.
9836 Value *Op0 = Builder->CreateBitCast(II->getOperand(1), Mask->getType());
9837 Value *Op1 = Builder->CreateBitCast(II->getOperand(2), Mask->getType());
9838 Value *Result = UndefValue::get(Op0->getType());
9840 // Only extract each element once.
9841 Value *ExtractedElts[32];
9842 memset(ExtractedElts, 0, sizeof(ExtractedElts));
9844 for (unsigned i = 0; i != 16; ++i) {
9845 if (isa<UndefValue>(Mask->getOperand(i)))
9847 unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue();
9848 Idx &= 31; // Match the hardware behavior.
9850 if (ExtractedElts[Idx] == 0) {
9851 ExtractedElts[Idx] =
9852 Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1,
9853 ConstantInt::get(Type::getInt32Ty(*Context), Idx&15, false),
9857 // Insert this value into the result vector.
9858 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
9859 ConstantInt::get(Type::getInt32Ty(*Context), i, false),
9862 return CastInst::Create(Instruction::BitCast, Result, CI.getType());
9867 case Intrinsic::stackrestore: {
9868 // If the save is right next to the restore, remove the restore. This can
9869 // happen when variable allocas are DCE'd.
9870 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getOperand(1))) {
9871 if (SS->getIntrinsicID() == Intrinsic::stacksave) {
9872 BasicBlock::iterator BI = SS;
9874 return EraseInstFromFunction(CI);
9878 // Scan down this block to see if there is another stack restore in the
9879 // same block without an intervening call/alloca.
9880 BasicBlock::iterator BI = II;
9881 TerminatorInst *TI = II->getParent()->getTerminator();
9882 bool CannotRemove = false;
9883 for (++BI; &*BI != TI; ++BI) {
9884 if (isa<AllocaInst>(BI) || isMalloc(BI)) {
9885 CannotRemove = true;
9888 if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
9889 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
9890 // If there is a stackrestore below this one, remove this one.
9891 if (II->getIntrinsicID() == Intrinsic::stackrestore)
9892 return EraseInstFromFunction(CI);
9893 // Otherwise, ignore the intrinsic.
9895 // If we found a non-intrinsic call, we can't remove the stack
9897 CannotRemove = true;
9903 // If the stack restore is in a return/unwind block and if there are no
9904 // allocas or calls between the restore and the return, nuke the restore.
9905 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<UnwindInst>(TI)))
9906 return EraseInstFromFunction(CI);
9911 return visitCallSite(II);
9914 // InvokeInst simplification
9916 Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
9917 return visitCallSite(&II);
9920 /// isSafeToEliminateVarargsCast - If this cast does not affect the value
9921 /// passed through the varargs area, we can eliminate the use of the cast.
9922 static bool isSafeToEliminateVarargsCast(const CallSite CS,
9923 const CastInst * const CI,
9924 const TargetData * const TD,
9926 if (!CI->isLosslessCast())
9929 // The size of ByVal arguments is derived from the type, so we
9930 // can't change to a type with a different size. If the size were
9931 // passed explicitly we could avoid this check.
9932 if (!CS.paramHasAttr(ix, Attribute::ByVal))
9936 cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
9937 const Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
9938 if (!SrcTy->isSized() || !DstTy->isSized())
9940 if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy))
9945 // visitCallSite - Improvements for call and invoke instructions.
9947 Instruction *InstCombiner::visitCallSite(CallSite CS) {
9948 bool Changed = false;
9950 // If the callee is a constexpr cast of a function, attempt to move the cast
9951 // to the arguments of the call/invoke.
9952 if (transformConstExprCastCall(CS)) return 0;
9954 Value *Callee = CS.getCalledValue();
9956 if (Function *CalleeF = dyn_cast<Function>(Callee))
9957 if (CalleeF->getCallingConv() != CS.getCallingConv()) {
9958 Instruction *OldCall = CS.getInstruction();
9959 // If the call and callee calling conventions don't match, this call must
9960 // be unreachable, as the call is undefined.
9961 new StoreInst(ConstantInt::getTrue(*Context),
9962 UndefValue::get(Type::getInt1PtrTy(*Context)),
9964 if (!OldCall->use_empty())
9965 OldCall->replaceAllUsesWith(UndefValue::get(OldCall->getType()));
9966 if (isa<CallInst>(OldCall)) // Not worth removing an invoke here.
9967 return EraseInstFromFunction(*OldCall);
9971 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
9972 // This instruction is not reachable, just remove it. We insert a store to
9973 // undef so that we know that this code is not reachable, despite the fact
9974 // that we can't modify the CFG here.
9975 new StoreInst(ConstantInt::getTrue(*Context),
9976 UndefValue::get(Type::getInt1PtrTy(*Context)),
9977 CS.getInstruction());
9979 if (!CS.getInstruction()->use_empty())
9980 CS.getInstruction()->
9981 replaceAllUsesWith(UndefValue::get(CS.getInstruction()->getType()));
9983 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
9984 // Don't break the CFG, insert a dummy cond branch.
9985 BranchInst::Create(II->getNormalDest(), II->getUnwindDest(),
9986 ConstantInt::getTrue(*Context), II);
9988 return EraseInstFromFunction(*CS.getInstruction());
9991 if (BitCastInst *BC = dyn_cast<BitCastInst>(Callee))
9992 if (IntrinsicInst *In = dyn_cast<IntrinsicInst>(BC->getOperand(0)))
9993 if (In->getIntrinsicID() == Intrinsic::init_trampoline)
9994 return transformCallThroughTrampoline(CS);
9996 const PointerType *PTy = cast<PointerType>(Callee->getType());
9997 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
9998 if (FTy->isVarArg()) {
9999 int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1);
10000 // See if we can optimize any arguments passed through the varargs area of
10002 for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(),
10003 E = CS.arg_end(); I != E; ++I, ++ix) {
10004 CastInst *CI = dyn_cast<CastInst>(*I);
10005 if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) {
10006 *I = CI->getOperand(0);
10012 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
10013 // Inline asm calls cannot throw - mark them 'nounwind'.
10014 CS.setDoesNotThrow();
10018 return Changed ? CS.getInstruction() : 0;
10021 // transformConstExprCastCall - If the callee is a constexpr cast of a function,
10022 // attempt to move the cast to the arguments of the call/invoke.
10024 bool InstCombiner::transformConstExprCastCall(CallSite CS) {
10025 if (!isa<ConstantExpr>(CS.getCalledValue())) return false;
10026 ConstantExpr *CE = cast<ConstantExpr>(CS.getCalledValue());
10027 if (CE->getOpcode() != Instruction::BitCast ||
10028 !isa<Function>(CE->getOperand(0)))
10030 Function *Callee = cast<Function>(CE->getOperand(0));
10031 Instruction *Caller = CS.getInstruction();
10032 const AttrListPtr &CallerPAL = CS.getAttributes();
10034 // Okay, this is a cast from a function to a different type. Unless doing so
10035 // would cause a type conversion of one of our arguments, change this call to
10036 // be a direct call with arguments casted to the appropriate types.
10038 const FunctionType *FT = Callee->getFunctionType();
10039 const Type *OldRetTy = Caller->getType();
10040 const Type *NewRetTy = FT->getReturnType();
10042 if (isa<StructType>(NewRetTy))
10043 return false; // TODO: Handle multiple return values.
10045 // Check to see if we are changing the return type...
10046 if (OldRetTy != NewRetTy) {
10047 if (Callee->isDeclaration() &&
10048 // Conversion is ok if changing from one pointer type to another or from
10049 // a pointer to an integer of the same size.
10050 !((isa<PointerType>(OldRetTy) || !TD ||
10051 OldRetTy == TD->getIntPtrType(Caller->getContext())) &&
10052 (isa<PointerType>(NewRetTy) || !TD ||
10053 NewRetTy == TD->getIntPtrType(Caller->getContext()))))
10054 return false; // Cannot transform this return value.
10056 if (!Caller->use_empty() &&
10057 // void -> non-void is handled specially
10058 NewRetTy != Type::getVoidTy(*Context) && !CastInst::isCastable(NewRetTy, OldRetTy))
10059 return false; // Cannot transform this return value.
10061 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
10062 Attributes RAttrs = CallerPAL.getRetAttributes();
10063 if (RAttrs & Attribute::typeIncompatible(NewRetTy))
10064 return false; // Attribute not compatible with transformed value.
10067 // If the callsite is an invoke instruction, and the return value is used by
10068 // a PHI node in a successor, we cannot change the return type of the call
10069 // because there is no place to put the cast instruction (without breaking
10070 // the critical edge). Bail out in this case.
10071 if (!Caller->use_empty())
10072 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
10073 for (Value::use_iterator UI = II->use_begin(), E = II->use_end();
10075 if (PHINode *PN = dyn_cast<PHINode>(*UI))
10076 if (PN->getParent() == II->getNormalDest() ||
10077 PN->getParent() == II->getUnwindDest())
10081 unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin());
10082 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
10084 CallSite::arg_iterator AI = CS.arg_begin();
10085 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
10086 const Type *ParamTy = FT->getParamType(i);
10087 const Type *ActTy = (*AI)->getType();
10089 if (!CastInst::isCastable(ActTy, ParamTy))
10090 return false; // Cannot transform this parameter value.
10092 if (CallerPAL.getParamAttributes(i + 1)
10093 & Attribute::typeIncompatible(ParamTy))
10094 return false; // Attribute not compatible with transformed value.
10096 // Converting from one pointer type to another or between a pointer and an
10097 // integer of the same size is safe even if we do not have a body.
10098 bool isConvertible = ActTy == ParamTy ||
10099 (TD && ((isa<PointerType>(ParamTy) ||
10100 ParamTy == TD->getIntPtrType(Caller->getContext())) &&
10101 (isa<PointerType>(ActTy) ||
10102 ActTy == TD->getIntPtrType(Caller->getContext()))));
10103 if (Callee->isDeclaration() && !isConvertible) return false;
10106 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg() &&
10107 Callee->isDeclaration())
10108 return false; // Do not delete arguments unless we have a function body.
10110 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
10111 !CallerPAL.isEmpty())
10112 // In this case we have more arguments than the new function type, but we
10113 // won't be dropping them. Check that these extra arguments have attributes
10114 // that are compatible with being a vararg call argument.
10115 for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
10116 if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams())
10118 Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs;
10119 if (PAttrs & Attribute::VarArgsIncompatible)
10123 // Okay, we decided that this is a safe thing to do: go ahead and start
10124 // inserting cast instructions as necessary...
10125 std::vector<Value*> Args;
10126 Args.reserve(NumActualArgs);
10127 SmallVector<AttributeWithIndex, 8> attrVec;
10128 attrVec.reserve(NumCommonArgs);
10130 // Get any return attributes.
10131 Attributes RAttrs = CallerPAL.getRetAttributes();
10133 // If the return value is not being used, the type may not be compatible
10134 // with the existing attributes. Wipe out any problematic attributes.
10135 RAttrs &= ~Attribute::typeIncompatible(NewRetTy);
10137 // Add the new return attributes.
10139 attrVec.push_back(AttributeWithIndex::get(0, RAttrs));
10141 AI = CS.arg_begin();
10142 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
10143 const Type *ParamTy = FT->getParamType(i);
10144 if ((*AI)->getType() == ParamTy) {
10145 Args.push_back(*AI);
10147 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI,
10148 false, ParamTy, false);
10149 Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy, "tmp"));
10152 // Add any parameter attributes.
10153 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
10154 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
10157 // If the function takes more arguments than the call was taking, add them
10159 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
10160 Args.push_back(Constant::getNullValue(FT->getParamType(i)));
10162 // If we are removing arguments to the function, emit an obnoxious warning.
10163 if (FT->getNumParams() < NumActualArgs) {
10164 if (!FT->isVarArg()) {
10165 errs() << "WARNING: While resolving call to function '"
10166 << Callee->getName() << "' arguments were dropped!\n";
10168 // Add all of the arguments in their promoted form to the arg list.
10169 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
10170 const Type *PTy = getPromotedType((*AI)->getType());
10171 if (PTy != (*AI)->getType()) {
10172 // Must promote to pass through va_arg area!
10173 Instruction::CastOps opcode =
10174 CastInst::getCastOpcode(*AI, false, PTy, false);
10175 Args.push_back(Builder->CreateCast(opcode, *AI, PTy, "tmp"));
10177 Args.push_back(*AI);
10180 // Add any parameter attributes.
10181 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
10182 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
10187 if (Attributes FnAttrs = CallerPAL.getFnAttributes())
10188 attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
10190 if (NewRetTy == Type::getVoidTy(*Context))
10191 Caller->setName(""); // Void type should not have a name.
10193 const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(),
10197 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
10198 NC = InvokeInst::Create(Callee, II->getNormalDest(), II->getUnwindDest(),
10199 Args.begin(), Args.end(),
10200 Caller->getName(), Caller);
10201 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
10202 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
10204 NC = CallInst::Create(Callee, Args.begin(), Args.end(),
10205 Caller->getName(), Caller);
10206 CallInst *CI = cast<CallInst>(Caller);
10207 if (CI->isTailCall())
10208 cast<CallInst>(NC)->setTailCall();
10209 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
10210 cast<CallInst>(NC)->setAttributes(NewCallerPAL);
10213 // Insert a cast of the return type as necessary.
10215 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
10216 if (NV->getType() != Type::getVoidTy(*Context)) {
10217 Instruction::CastOps opcode = CastInst::getCastOpcode(NC, false,
10219 NV = NC = CastInst::Create(opcode, NC, OldRetTy, "tmp");
10221 // If this is an invoke instruction, we should insert it after the first
10222 // non-phi, instruction in the normal successor block.
10223 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
10224 BasicBlock::iterator I = II->getNormalDest()->getFirstNonPHI();
10225 InsertNewInstBefore(NC, *I);
10227 // Otherwise, it's a call, just insert cast right after the call instr
10228 InsertNewInstBefore(NC, *Caller);
10230 Worklist.AddUsersToWorkList(*Caller);
10232 NV = UndefValue::get(Caller->getType());
10237 if (!Caller->use_empty())
10238 Caller->replaceAllUsesWith(NV);
10240 EraseInstFromFunction(*Caller);
10244 // transformCallThroughTrampoline - Turn a call to a function created by the
10245 // init_trampoline intrinsic into a direct call to the underlying function.
10247 Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) {
10248 Value *Callee = CS.getCalledValue();
10249 const PointerType *PTy = cast<PointerType>(Callee->getType());
10250 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
10251 const AttrListPtr &Attrs = CS.getAttributes();
10253 // If the call already has the 'nest' attribute somewhere then give up -
10254 // otherwise 'nest' would occur twice after splicing in the chain.
10255 if (Attrs.hasAttrSomewhere(Attribute::Nest))
10258 IntrinsicInst *Tramp =
10259 cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0));
10261 Function *NestF = cast<Function>(Tramp->getOperand(2)->stripPointerCasts());
10262 const PointerType *NestFPTy = cast<PointerType>(NestF->getType());
10263 const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
10265 const AttrListPtr &NestAttrs = NestF->getAttributes();
10266 if (!NestAttrs.isEmpty()) {
10267 unsigned NestIdx = 1;
10268 const Type *NestTy = 0;
10269 Attributes NestAttr = Attribute::None;
10271 // Look for a parameter marked with the 'nest' attribute.
10272 for (FunctionType::param_iterator I = NestFTy->param_begin(),
10273 E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
10274 if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) {
10275 // Record the parameter type and any other attributes.
10277 NestAttr = NestAttrs.getParamAttributes(NestIdx);
10282 Instruction *Caller = CS.getInstruction();
10283 std::vector<Value*> NewArgs;
10284 NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1);
10286 SmallVector<AttributeWithIndex, 8> NewAttrs;
10287 NewAttrs.reserve(Attrs.getNumSlots() + 1);
10289 // Insert the nest argument into the call argument list, which may
10290 // mean appending it. Likewise for attributes.
10292 // Add any result attributes.
10293 if (Attributes Attr = Attrs.getRetAttributes())
10294 NewAttrs.push_back(AttributeWithIndex::get(0, Attr));
10298 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
10300 if (Idx == NestIdx) {
10301 // Add the chain argument and attributes.
10302 Value *NestVal = Tramp->getOperand(3);
10303 if (NestVal->getType() != NestTy)
10304 NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller);
10305 NewArgs.push_back(NestVal);
10306 NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr));
10312 // Add the original argument and attributes.
10313 NewArgs.push_back(*I);
10314 if (Attributes Attr = Attrs.getParamAttributes(Idx))
10316 (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr));
10322 // Add any function attributes.
10323 if (Attributes Attr = Attrs.getFnAttributes())
10324 NewAttrs.push_back(AttributeWithIndex::get(~0, Attr));
10326 // The trampoline may have been bitcast to a bogus type (FTy).
10327 // Handle this by synthesizing a new function type, equal to FTy
10328 // with the chain parameter inserted.
10330 std::vector<const Type*> NewTypes;
10331 NewTypes.reserve(FTy->getNumParams()+1);
10333 // Insert the chain's type into the list of parameter types, which may
10334 // mean appending it.
10337 FunctionType::param_iterator I = FTy->param_begin(),
10338 E = FTy->param_end();
10341 if (Idx == NestIdx)
10342 // Add the chain's type.
10343 NewTypes.push_back(NestTy);
10348 // Add the original type.
10349 NewTypes.push_back(*I);
10355 // Replace the trampoline call with a direct call. Let the generic
10356 // code sort out any function type mismatches.
10357 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
10359 Constant *NewCallee =
10360 NestF->getType() == PointerType::getUnqual(NewFTy) ?
10361 NestF : ConstantExpr::getBitCast(NestF,
10362 PointerType::getUnqual(NewFTy));
10363 const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(),
10366 Instruction *NewCaller;
10367 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
10368 NewCaller = InvokeInst::Create(NewCallee,
10369 II->getNormalDest(), II->getUnwindDest(),
10370 NewArgs.begin(), NewArgs.end(),
10371 Caller->getName(), Caller);
10372 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
10373 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
10375 NewCaller = CallInst::Create(NewCallee, NewArgs.begin(), NewArgs.end(),
10376 Caller->getName(), Caller);
10377 if (cast<CallInst>(Caller)->isTailCall())
10378 cast<CallInst>(NewCaller)->setTailCall();
10379 cast<CallInst>(NewCaller)->
10380 setCallingConv(cast<CallInst>(Caller)->getCallingConv());
10381 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
10383 if (Caller->getType() != Type::getVoidTy(*Context) && !Caller->use_empty())
10384 Caller->replaceAllUsesWith(NewCaller);
10385 Caller->eraseFromParent();
10386 Worklist.Remove(Caller);
10391 // Replace the trampoline call with a direct call. Since there is no 'nest'
10392 // parameter, there is no need to adjust the argument list. Let the generic
10393 // code sort out any function type mismatches.
10394 Constant *NewCallee =
10395 NestF->getType() == PTy ? NestF :
10396 ConstantExpr::getBitCast(NestF, PTy);
10397 CS.setCalledFunction(NewCallee);
10398 return CS.getInstruction();
10401 /// FoldPHIArgBinOpIntoPHI - If we have something like phi [add (a,b), add(a,c)]
10402 /// and if a/b/c and the add's all have a single use, turn this into a phi
10403 /// and a single binop.
10404 Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) {
10405 Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
10406 assert(isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst));
10407 unsigned Opc = FirstInst->getOpcode();
10408 Value *LHSVal = FirstInst->getOperand(0);
10409 Value *RHSVal = FirstInst->getOperand(1);
10411 const Type *LHSType = LHSVal->getType();
10412 const Type *RHSType = RHSVal->getType();
10414 // Scan to see if all operands are the same opcode, and all have one use.
10415 for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
10416 Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i));
10417 if (!I || I->getOpcode() != Opc || !I->hasOneUse() ||
10418 // Verify type of the LHS matches so we don't fold cmp's of different
10419 // types or GEP's with different index types.
10420 I->getOperand(0)->getType() != LHSType ||
10421 I->getOperand(1)->getType() != RHSType)
10424 // If they are CmpInst instructions, check their predicates
10425 if (Opc == Instruction::ICmp || Opc == Instruction::FCmp)
10426 if (cast<CmpInst>(I)->getPredicate() !=
10427 cast<CmpInst>(FirstInst)->getPredicate())
10430 // Keep track of which operand needs a phi node.
10431 if (I->getOperand(0) != LHSVal) LHSVal = 0;
10432 if (I->getOperand(1) != RHSVal) RHSVal = 0;
10435 // If both LHS and RHS would need a PHI, don't do this transformation,
10436 // because it would increase the number of PHIs entering the block,
10437 // which leads to higher register pressure. This is especially
10438 // bad when the PHIs are in the header of a loop.
10439 if (!LHSVal && !RHSVal)
10442 // Otherwise, this is safe to transform!
10444 Value *InLHS = FirstInst->getOperand(0);
10445 Value *InRHS = FirstInst->getOperand(1);
10446 PHINode *NewLHS = 0, *NewRHS = 0;
10448 NewLHS = PHINode::Create(LHSType,
10449 FirstInst->getOperand(0)->getName() + ".pn");
10450 NewLHS->reserveOperandSpace(PN.getNumOperands()/2);
10451 NewLHS->addIncoming(InLHS, PN.getIncomingBlock(0));
10452 InsertNewInstBefore(NewLHS, PN);
10457 NewRHS = PHINode::Create(RHSType,
10458 FirstInst->getOperand(1)->getName() + ".pn");
10459 NewRHS->reserveOperandSpace(PN.getNumOperands()/2);
10460 NewRHS->addIncoming(InRHS, PN.getIncomingBlock(0));
10461 InsertNewInstBefore(NewRHS, PN);
10465 // Add all operands to the new PHIs.
10466 if (NewLHS || NewRHS) {
10467 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
10468 Instruction *InInst = cast<Instruction>(PN.getIncomingValue(i));
10470 Value *NewInLHS = InInst->getOperand(0);
10471 NewLHS->addIncoming(NewInLHS, PN.getIncomingBlock(i));
10474 Value *NewInRHS = InInst->getOperand(1);
10475 NewRHS->addIncoming(NewInRHS, PN.getIncomingBlock(i));
10480 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst))
10481 return BinaryOperator::Create(BinOp->getOpcode(), LHSVal, RHSVal);
10482 CmpInst *CIOp = cast<CmpInst>(FirstInst);
10483 return CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
10487 Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) {
10488 GetElementPtrInst *FirstInst =cast<GetElementPtrInst>(PN.getIncomingValue(0));
10490 SmallVector<Value*, 16> FixedOperands(FirstInst->op_begin(),
10491 FirstInst->op_end());
10492 // This is true if all GEP bases are allocas and if all indices into them are
10494 bool AllBasePointersAreAllocas = true;
10496 // We don't want to replace this phi if the replacement would require
10497 // more than one phi, which leads to higher register pressure. This is
10498 // especially bad when the PHIs are in the header of a loop.
10499 bool NeededPhi = false;
10501 // Scan to see if all operands are the same opcode, and all have one use.
10502 for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
10503 GetElementPtrInst *GEP= dyn_cast<GetElementPtrInst>(PN.getIncomingValue(i));
10504 if (!GEP || !GEP->hasOneUse() || GEP->getType() != FirstInst->getType() ||
10505 GEP->getNumOperands() != FirstInst->getNumOperands())
10508 // Keep track of whether or not all GEPs are of alloca pointers.
10509 if (AllBasePointersAreAllocas &&
10510 (!isa<AllocaInst>(GEP->getOperand(0)) ||
10511 !GEP->hasAllConstantIndices()))
10512 AllBasePointersAreAllocas = false;
10514 // Compare the operand lists.
10515 for (unsigned op = 0, e = FirstInst->getNumOperands(); op != e; ++op) {
10516 if (FirstInst->getOperand(op) == GEP->getOperand(op))
10519 // Don't merge two GEPs when two operands differ (introducing phi nodes)
10520 // if one of the PHIs has a constant for the index. The index may be
10521 // substantially cheaper to compute for the constants, so making it a
10522 // variable index could pessimize the path. This also handles the case
10523 // for struct indices, which must always be constant.
10524 if (isa<ConstantInt>(FirstInst->getOperand(op)) ||
10525 isa<ConstantInt>(GEP->getOperand(op)))
10528 if (FirstInst->getOperand(op)->getType() !=GEP->getOperand(op)->getType())
10531 // If we already needed a PHI for an earlier operand, and another operand
10532 // also requires a PHI, we'd be introducing more PHIs than we're
10533 // eliminating, which increases register pressure on entry to the PHI's
10538 FixedOperands[op] = 0; // Needs a PHI.
10543 // If all of the base pointers of the PHI'd GEPs are from allocas, don't
10544 // bother doing this transformation. At best, this will just save a bit of
10545 // offset calculation, but all the predecessors will have to materialize the
10546 // stack address into a register anyway. We'd actually rather *clone* the
10547 // load up into the predecessors so that we have a load of a gep of an alloca,
10548 // which can usually all be folded into the load.
10549 if (AllBasePointersAreAllocas)
10552 // Otherwise, this is safe to transform. Insert PHI nodes for each operand
10553 // that is variable.
10554 SmallVector<PHINode*, 16> OperandPhis(FixedOperands.size());
10556 bool HasAnyPHIs = false;
10557 for (unsigned i = 0, e = FixedOperands.size(); i != e; ++i) {
10558 if (FixedOperands[i]) continue; // operand doesn't need a phi.
10559 Value *FirstOp = FirstInst->getOperand(i);
10560 PHINode *NewPN = PHINode::Create(FirstOp->getType(),
10561 FirstOp->getName()+".pn");
10562 InsertNewInstBefore(NewPN, PN);
10564 NewPN->reserveOperandSpace(e);
10565 NewPN->addIncoming(FirstOp, PN.getIncomingBlock(0));
10566 OperandPhis[i] = NewPN;
10567 FixedOperands[i] = NewPN;
10572 // Add all operands to the new PHIs.
10574 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
10575 GetElementPtrInst *InGEP =cast<GetElementPtrInst>(PN.getIncomingValue(i));
10576 BasicBlock *InBB = PN.getIncomingBlock(i);
10578 for (unsigned op = 0, e = OperandPhis.size(); op != e; ++op)
10579 if (PHINode *OpPhi = OperandPhis[op])
10580 OpPhi->addIncoming(InGEP->getOperand(op), InBB);
10584 Value *Base = FixedOperands[0];
10585 return cast<GEPOperator>(FirstInst)->isInBounds() ?
10586 GetElementPtrInst::CreateInBounds(Base, FixedOperands.begin()+1,
10587 FixedOperands.end()) :
10588 GetElementPtrInst::Create(Base, FixedOperands.begin()+1,
10589 FixedOperands.end());
10593 /// isSafeAndProfitableToSinkLoad - Return true if we know that it is safe to
10594 /// sink the load out of the block that defines it. This means that it must be
10595 /// obvious the value of the load is not changed from the point of the load to
10596 /// the end of the block it is in.
10598 /// Finally, it is safe, but not profitable, to sink a load targetting a
10599 /// non-address-taken alloca. Doing so will cause us to not promote the alloca
10601 static bool isSafeAndProfitableToSinkLoad(LoadInst *L) {
10602 BasicBlock::iterator BBI = L, E = L->getParent()->end();
10604 for (++BBI; BBI != E; ++BBI)
10605 if (BBI->mayWriteToMemory())
10608 // Check for non-address taken alloca. If not address-taken already, it isn't
10609 // profitable to do this xform.
10610 if (AllocaInst *AI = dyn_cast<AllocaInst>(L->getOperand(0))) {
10611 bool isAddressTaken = false;
10612 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
10614 if (isa<LoadInst>(UI)) continue;
10615 if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) {
10616 // If storing TO the alloca, then the address isn't taken.
10617 if (SI->getOperand(1) == AI) continue;
10619 isAddressTaken = true;
10623 if (!isAddressTaken && AI->isStaticAlloca())
10627 // If this load is a load from a GEP with a constant offset from an alloca,
10628 // then we don't want to sink it. In its present form, it will be
10629 // load [constant stack offset]. Sinking it will cause us to have to
10630 // materialize the stack addresses in each predecessor in a register only to
10631 // do a shared load from register in the successor.
10632 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(L->getOperand(0)))
10633 if (AllocaInst *AI = dyn_cast<AllocaInst>(GEP->getOperand(0)))
10634 if (AI->isStaticAlloca() && GEP->hasAllConstantIndices())
10641 // FoldPHIArgOpIntoPHI - If all operands to a PHI node are the same "unary"
10642 // operator and they all are only used by the PHI, PHI together their
10643 // inputs, and do the operation once, to the result of the PHI.
10644 Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
10645 Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
10647 // Scan the instruction, looking for input operations that can be folded away.
10648 // If all input operands to the phi are the same instruction (e.g. a cast from
10649 // the same type or "+42") we can pull the operation through the PHI, reducing
10650 // code size and simplifying code.
10651 Constant *ConstantOp = 0;
10652 const Type *CastSrcTy = 0;
10653 bool isVolatile = false;
10654 if (isa<CastInst>(FirstInst)) {
10655 CastSrcTy = FirstInst->getOperand(0)->getType();
10656 } else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) {
10657 // Can fold binop, compare or shift here if the RHS is a constant,
10658 // otherwise call FoldPHIArgBinOpIntoPHI.
10659 ConstantOp = dyn_cast<Constant>(FirstInst->getOperand(1));
10660 if (ConstantOp == 0)
10661 return FoldPHIArgBinOpIntoPHI(PN);
10662 } else if (LoadInst *LI = dyn_cast<LoadInst>(FirstInst)) {
10663 isVolatile = LI->isVolatile();
10664 // We can't sink the load if the loaded value could be modified between the
10665 // load and the PHI.
10666 if (LI->getParent() != PN.getIncomingBlock(0) ||
10667 !isSafeAndProfitableToSinkLoad(LI))
10670 // If the PHI is of volatile loads and the load block has multiple
10671 // successors, sinking it would remove a load of the volatile value from
10672 // the path through the other successor.
10674 LI->getParent()->getTerminator()->getNumSuccessors() != 1)
10677 } else if (isa<GetElementPtrInst>(FirstInst)) {
10678 return FoldPHIArgGEPIntoPHI(PN);
10680 return 0; // Cannot fold this operation.
10683 // Check to see if all arguments are the same operation.
10684 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
10685 if (!isa<Instruction>(PN.getIncomingValue(i))) return 0;
10686 Instruction *I = cast<Instruction>(PN.getIncomingValue(i));
10687 if (!I->hasOneUse() || !I->isSameOperationAs(FirstInst))
10690 if (I->getOperand(0)->getType() != CastSrcTy)
10691 return 0; // Cast operation must match.
10692 } else if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
10693 // We can't sink the load if the loaded value could be modified between
10694 // the load and the PHI.
10695 if (LI->isVolatile() != isVolatile ||
10696 LI->getParent() != PN.getIncomingBlock(i) ||
10697 !isSafeAndProfitableToSinkLoad(LI))
10700 // If the PHI is of volatile loads and the load block has multiple
10701 // successors, sinking it would remove a load of the volatile value from
10702 // the path through the other successor.
10704 LI->getParent()->getTerminator()->getNumSuccessors() != 1)
10707 } else if (I->getOperand(1) != ConstantOp) {
10712 // Okay, they are all the same operation. Create a new PHI node of the
10713 // correct type, and PHI together all of the LHS's of the instructions.
10714 PHINode *NewPN = PHINode::Create(FirstInst->getOperand(0)->getType(),
10715 PN.getName()+".in");
10716 NewPN->reserveOperandSpace(PN.getNumOperands()/2);
10718 Value *InVal = FirstInst->getOperand(0);
10719 NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
10721 // Add all operands to the new PHI.
10722 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
10723 Value *NewInVal = cast<Instruction>(PN.getIncomingValue(i))->getOperand(0);
10724 if (NewInVal != InVal)
10726 NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
10731 // The new PHI unions all of the same values together. This is really
10732 // common, so we handle it intelligently here for compile-time speed.
10736 InsertNewInstBefore(NewPN, PN);
10740 // Insert and return the new operation.
10741 if (CastInst* FirstCI = dyn_cast<CastInst>(FirstInst))
10742 return CastInst::Create(FirstCI->getOpcode(), PhiVal, PN.getType());
10743 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst))
10744 return BinaryOperator::Create(BinOp->getOpcode(), PhiVal, ConstantOp);
10745 if (CmpInst *CIOp = dyn_cast<CmpInst>(FirstInst))
10746 return CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
10747 PhiVal, ConstantOp);
10748 assert(isa<LoadInst>(FirstInst) && "Unknown operation");
10750 // If this was a volatile load that we are merging, make sure to loop through
10751 // and mark all the input loads as non-volatile. If we don't do this, we will
10752 // insert a new volatile load and the old ones will not be deletable.
10754 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
10755 cast<LoadInst>(PN.getIncomingValue(i))->setVolatile(false);
10757 return new LoadInst(PhiVal, "", isVolatile);
10760 /// DeadPHICycle - Return true if this PHI node is only used by a PHI node cycle
10762 static bool DeadPHICycle(PHINode *PN,
10763 SmallPtrSet<PHINode*, 16> &PotentiallyDeadPHIs) {
10764 if (PN->use_empty()) return true;
10765 if (!PN->hasOneUse()) return false;
10767 // Remember this node, and if we find the cycle, return.
10768 if (!PotentiallyDeadPHIs.insert(PN))
10771 // Don't scan crazily complex things.
10772 if (PotentiallyDeadPHIs.size() == 16)
10775 if (PHINode *PU = dyn_cast<PHINode>(PN->use_back()))
10776 return DeadPHICycle(PU, PotentiallyDeadPHIs);
10781 /// PHIsEqualValue - Return true if this phi node is always equal to
10782 /// NonPhiInVal. This happens with mutually cyclic phi nodes like:
10783 /// z = some value; x = phi (y, z); y = phi (x, z)
10784 static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal,
10785 SmallPtrSet<PHINode*, 16> &ValueEqualPHIs) {
10786 // See if we already saw this PHI node.
10787 if (!ValueEqualPHIs.insert(PN))
10790 // Don't scan crazily complex things.
10791 if (ValueEqualPHIs.size() == 16)
10794 // Scan the operands to see if they are either phi nodes or are equal to
10796 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
10797 Value *Op = PN->getIncomingValue(i);
10798 if (PHINode *OpPN = dyn_cast<PHINode>(Op)) {
10799 if (!PHIsEqualValue(OpPN, NonPhiInVal, ValueEqualPHIs))
10801 } else if (Op != NonPhiInVal)
10809 // PHINode simplification
10811 Instruction *InstCombiner::visitPHINode(PHINode &PN) {
10812 // If LCSSA is around, don't mess with Phi nodes
10813 if (MustPreserveLCSSA) return 0;
10815 if (Value *V = PN.hasConstantValue())
10816 return ReplaceInstUsesWith(PN, V);
10818 // If all PHI operands are the same operation, pull them through the PHI,
10819 // reducing code size.
10820 if (isa<Instruction>(PN.getIncomingValue(0)) &&
10821 isa<Instruction>(PN.getIncomingValue(1)) &&
10822 cast<Instruction>(PN.getIncomingValue(0))->getOpcode() ==
10823 cast<Instruction>(PN.getIncomingValue(1))->getOpcode() &&
10824 // FIXME: The hasOneUse check will fail for PHIs that use the value more
10825 // than themselves more than once.
10826 PN.getIncomingValue(0)->hasOneUse())
10827 if (Instruction *Result = FoldPHIArgOpIntoPHI(PN))
10830 // If this is a trivial cycle in the PHI node graph, remove it. Basically, if
10831 // this PHI only has a single use (a PHI), and if that PHI only has one use (a
10832 // PHI)... break the cycle.
10833 if (PN.hasOneUse()) {
10834 Instruction *PHIUser = cast<Instruction>(PN.use_back());
10835 if (PHINode *PU = dyn_cast<PHINode>(PHIUser)) {
10836 SmallPtrSet<PHINode*, 16> PotentiallyDeadPHIs;
10837 PotentiallyDeadPHIs.insert(&PN);
10838 if (DeadPHICycle(PU, PotentiallyDeadPHIs))
10839 return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType()));
10842 // If this phi has a single use, and if that use just computes a value for
10843 // the next iteration of a loop, delete the phi. This occurs with unused
10844 // induction variables, e.g. "for (int j = 0; ; ++j);". Detecting this
10845 // common case here is good because the only other things that catch this
10846 // are induction variable analysis (sometimes) and ADCE, which is only run
10848 if (PHIUser->hasOneUse() &&
10849 (isa<BinaryOperator>(PHIUser) || isa<GetElementPtrInst>(PHIUser)) &&
10850 PHIUser->use_back() == &PN) {
10851 return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType()));
10855 // We sometimes end up with phi cycles that non-obviously end up being the
10856 // same value, for example:
10857 // z = some value; x = phi (y, z); y = phi (x, z)
10858 // where the phi nodes don't necessarily need to be in the same block. Do a
10859 // quick check to see if the PHI node only contains a single non-phi value, if
10860 // so, scan to see if the phi cycle is actually equal to that value.
10862 unsigned InValNo = 0, NumOperandVals = PN.getNumIncomingValues();
10863 // Scan for the first non-phi operand.
10864 while (InValNo != NumOperandVals &&
10865 isa<PHINode>(PN.getIncomingValue(InValNo)))
10868 if (InValNo != NumOperandVals) {
10869 Value *NonPhiInVal = PN.getOperand(InValNo);
10871 // Scan the rest of the operands to see if there are any conflicts, if so
10872 // there is no need to recursively scan other phis.
10873 for (++InValNo; InValNo != NumOperandVals; ++InValNo) {
10874 Value *OpVal = PN.getIncomingValue(InValNo);
10875 if (OpVal != NonPhiInVal && !isa<PHINode>(OpVal))
10879 // If we scanned over all operands, then we have one unique value plus
10880 // phi values. Scan PHI nodes to see if they all merge in each other or
10882 if (InValNo == NumOperandVals) {
10883 SmallPtrSet<PHINode*, 16> ValueEqualPHIs;
10884 if (PHIsEqualValue(&PN, NonPhiInVal, ValueEqualPHIs))
10885 return ReplaceInstUsesWith(PN, NonPhiInVal);
10892 Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
10893 Value *PtrOp = GEP.getOperand(0);
10894 // Eliminate 'getelementptr %P, i32 0' and 'getelementptr %P', they are noops.
10895 if (GEP.getNumOperands() == 1)
10896 return ReplaceInstUsesWith(GEP, PtrOp);
10898 if (isa<UndefValue>(GEP.getOperand(0)))
10899 return ReplaceInstUsesWith(GEP, UndefValue::get(GEP.getType()));
10901 bool HasZeroPointerIndex = false;
10902 if (Constant *C = dyn_cast<Constant>(GEP.getOperand(1)))
10903 HasZeroPointerIndex = C->isNullValue();
10905 if (GEP.getNumOperands() == 2 && HasZeroPointerIndex)
10906 return ReplaceInstUsesWith(GEP, PtrOp);
10908 // Eliminate unneeded casts for indices.
10910 bool MadeChange = false;
10911 unsigned PtrSize = TD->getPointerSizeInBits();
10913 gep_type_iterator GTI = gep_type_begin(GEP);
10914 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();
10915 I != E; ++I, ++GTI) {
10916 if (!isa<SequentialType>(*GTI)) continue;
10918 // If we are using a wider index than needed for this platform, shrink it
10919 // to what we need. If narrower, sign-extend it to what we need. This
10920 // explicit cast can make subsequent optimizations more obvious.
10921 unsigned OpBits = cast<IntegerType>((*I)->getType())->getBitWidth();
10922 if (OpBits == PtrSize)
10925 *I = Builder->CreateIntCast(*I, TD->getIntPtrType(GEP.getContext()),true);
10928 if (MadeChange) return &GEP;
10931 // Combine Indices - If the source pointer to this getelementptr instruction
10932 // is a getelementptr instruction, combine the indices of the two
10933 // getelementptr instructions into a single instruction.
10935 if (GEPOperator *Src = dyn_cast<GEPOperator>(PtrOp)) {
10936 // Note that if our source is a gep chain itself that we wait for that
10937 // chain to be resolved before we perform this transformation. This
10938 // avoids us creating a TON of code in some cases.
10940 if (GetElementPtrInst *SrcGEP =
10941 dyn_cast<GetElementPtrInst>(Src->getOperand(0)))
10942 if (SrcGEP->getNumOperands() == 2)
10943 return 0; // Wait until our source is folded to completion.
10945 SmallVector<Value*, 8> Indices;
10947 // Find out whether the last index in the source GEP is a sequential idx.
10948 bool EndsWithSequential = false;
10949 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
10951 EndsWithSequential = !isa<StructType>(*I);
10953 // Can we combine the two pointer arithmetics offsets?
10954 if (EndsWithSequential) {
10955 // Replace: gep (gep %P, long B), long A, ...
10956 // With: T = long A+B; gep %P, T, ...
10959 Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
10960 Value *GO1 = GEP.getOperand(1);
10961 if (SO1 == Constant::getNullValue(SO1->getType())) {
10963 } else if (GO1 == Constant::getNullValue(GO1->getType())) {
10966 // If they aren't the same type, then the input hasn't been processed
10967 // by the loop above yet (which canonicalizes sequential index types to
10968 // intptr_t). Just avoid transforming this until the input has been
10970 if (SO1->getType() != GO1->getType())
10972 Sum = Builder->CreateAdd(SO1, GO1, PtrOp->getName()+".sum");
10975 // Update the GEP in place if possible.
10976 if (Src->getNumOperands() == 2) {
10977 GEP.setOperand(0, Src->getOperand(0));
10978 GEP.setOperand(1, Sum);
10981 Indices.append(Src->op_begin()+1, Src->op_end()-1);
10982 Indices.push_back(Sum);
10983 Indices.append(GEP.op_begin()+2, GEP.op_end());
10984 } else if (isa<Constant>(*GEP.idx_begin()) &&
10985 cast<Constant>(*GEP.idx_begin())->isNullValue() &&
10986 Src->getNumOperands() != 1) {
10987 // Otherwise we can do the fold if the first index of the GEP is a zero
10988 Indices.append(Src->op_begin()+1, Src->op_end());
10989 Indices.append(GEP.idx_begin()+1, GEP.idx_end());
10992 if (!Indices.empty())
10993 return (cast<GEPOperator>(&GEP)->isInBounds() &&
10994 Src->isInBounds()) ?
10995 GetElementPtrInst::CreateInBounds(Src->getOperand(0), Indices.begin(),
10996 Indices.end(), GEP.getName()) :
10997 GetElementPtrInst::Create(Src->getOperand(0), Indices.begin(),
10998 Indices.end(), GEP.getName());
11001 // Handle gep(bitcast x) and gep(gep x, 0, 0, 0).
11002 if (Value *X = getBitCastOperand(PtrOp)) {
11003 assert(isa<PointerType>(X->getType()) && "Must be cast from pointer");
11005 // If the input bitcast is actually "bitcast(bitcast(x))", then we don't
11006 // want to change the gep until the bitcasts are eliminated.
11007 if (getBitCastOperand(X)) {
11008 Worklist.AddValue(PtrOp);
11012 // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ...
11013 // into : GEP [10 x i8]* X, i32 0, ...
11015 // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ...
11016 // into : GEP i8* X, ...
11018 // This occurs when the program declares an array extern like "int X[];"
11019 if (HasZeroPointerIndex) {
11020 const PointerType *CPTy = cast<PointerType>(PtrOp->getType());
11021 const PointerType *XTy = cast<PointerType>(X->getType());
11022 if (const ArrayType *CATy =
11023 dyn_cast<ArrayType>(CPTy->getElementType())) {
11024 // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ?
11025 if (CATy->getElementType() == XTy->getElementType()) {
11026 // -> GEP i8* X, ...
11027 SmallVector<Value*, 8> Indices(GEP.idx_begin()+1, GEP.idx_end());
11028 return cast<GEPOperator>(&GEP)->isInBounds() ?
11029 GetElementPtrInst::CreateInBounds(X, Indices.begin(), Indices.end(),
11031 GetElementPtrInst::Create(X, Indices.begin(), Indices.end(),
11035 if (const ArrayType *XATy = dyn_cast<ArrayType>(XTy->getElementType())){
11036 // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
11037 if (CATy->getElementType() == XATy->getElementType()) {
11038 // -> GEP [10 x i8]* X, i32 0, ...
11039 // At this point, we know that the cast source type is a pointer
11040 // to an array of the same type as the destination pointer
11041 // array. Because the array type is never stepped over (there
11042 // is a leading zero) we can fold the cast into this GEP.
11043 GEP.setOperand(0, X);
11048 } else if (GEP.getNumOperands() == 2) {
11049 // Transform things like:
11050 // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V
11051 // into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
11052 const Type *SrcElTy = cast<PointerType>(X->getType())->getElementType();
11053 const Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType();
11054 if (TD && isa<ArrayType>(SrcElTy) &&
11055 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()) ==
11056 TD->getTypeAllocSize(ResElTy)) {
11058 Idx[0] = Constant::getNullValue(Type::getInt32Ty(*Context));
11059 Idx[1] = GEP.getOperand(1);
11060 Value *NewGEP = cast<GEPOperator>(&GEP)->isInBounds() ?
11061 Builder->CreateInBoundsGEP(X, Idx, Idx + 2, GEP.getName()) :
11062 Builder->CreateGEP(X, Idx, Idx + 2, GEP.getName());
11063 // V and GEP are both pointer types --> BitCast
11064 return new BitCastInst(NewGEP, GEP.getType());
11067 // Transform things like:
11068 // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
11069 // (where tmp = 8*tmp2) into:
11070 // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
11072 if (TD && isa<ArrayType>(SrcElTy) && ResElTy == Type::getInt8Ty(*Context)) {
11073 uint64_t ArrayEltSize =
11074 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType());
11076 // Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We
11077 // allow either a mul, shift, or constant here.
11079 ConstantInt *Scale = 0;
11080 if (ArrayEltSize == 1) {
11081 NewIdx = GEP.getOperand(1);
11082 Scale = ConstantInt::get(cast<IntegerType>(NewIdx->getType()), 1);
11083 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP.getOperand(1))) {
11084 NewIdx = ConstantInt::get(CI->getType(), 1);
11086 } else if (Instruction *Inst =dyn_cast<Instruction>(GEP.getOperand(1))){
11087 if (Inst->getOpcode() == Instruction::Shl &&
11088 isa<ConstantInt>(Inst->getOperand(1))) {
11089 ConstantInt *ShAmt = cast<ConstantInt>(Inst->getOperand(1));
11090 uint32_t ShAmtVal = ShAmt->getLimitedValue(64);
11091 Scale = ConstantInt::get(cast<IntegerType>(Inst->getType()),
11093 NewIdx = Inst->getOperand(0);
11094 } else if (Inst->getOpcode() == Instruction::Mul &&
11095 isa<ConstantInt>(Inst->getOperand(1))) {
11096 Scale = cast<ConstantInt>(Inst->getOperand(1));
11097 NewIdx = Inst->getOperand(0);
11101 // If the index will be to exactly the right offset with the scale taken
11102 // out, perform the transformation. Note, we don't know whether Scale is
11103 // signed or not. We'll use unsigned version of division/modulo
11104 // operation after making sure Scale doesn't have the sign bit set.
11105 if (ArrayEltSize && Scale && Scale->getSExtValue() >= 0LL &&
11106 Scale->getZExtValue() % ArrayEltSize == 0) {
11107 Scale = ConstantInt::get(Scale->getType(),
11108 Scale->getZExtValue() / ArrayEltSize);
11109 if (Scale->getZExtValue() != 1) {
11110 Constant *C = ConstantExpr::getIntegerCast(Scale, NewIdx->getType(),
11112 NewIdx = Builder->CreateMul(NewIdx, C, "idxscale");
11115 // Insert the new GEP instruction.
11117 Idx[0] = Constant::getNullValue(Type::getInt32Ty(*Context));
11119 Value *NewGEP = cast<GEPOperator>(&GEP)->isInBounds() ?
11120 Builder->CreateInBoundsGEP(X, Idx, Idx + 2, GEP.getName()) :
11121 Builder->CreateGEP(X, Idx, Idx + 2, GEP.getName());
11122 // The NewGEP must be pointer typed, so must the old one -> BitCast
11123 return new BitCastInst(NewGEP, GEP.getType());
11129 /// See if we can simplify:
11130 /// X = bitcast A* to B*
11131 /// Y = gep X, <...constant indices...>
11132 /// into a gep of the original struct. This is important for SROA and alias
11133 /// analysis of unions. If "A" is also a bitcast, wait for A/X to be merged.
11134 if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) {
11136 !isa<BitCastInst>(BCI->getOperand(0)) && GEP.hasAllConstantIndices()) {
11137 // Determine how much the GEP moves the pointer. We are guaranteed to get
11138 // a constant back from EmitGEPOffset.
11139 ConstantInt *OffsetV =
11140 cast<ConstantInt>(EmitGEPOffset(&GEP, GEP, *this));
11141 int64_t Offset = OffsetV->getSExtValue();
11143 // If this GEP instruction doesn't move the pointer, just replace the GEP
11144 // with a bitcast of the real input to the dest type.
11146 // If the bitcast is of an allocation, and the allocation will be
11147 // converted to match the type of the cast, don't touch this.
11148 if (isa<AllocationInst>(BCI->getOperand(0)) ||
11149 isMalloc(BCI->getOperand(0))) {
11150 // See if the bitcast simplifies, if so, don't nuke this GEP yet.
11151 if (Instruction *I = visitBitCast(*BCI)) {
11154 BCI->getParent()->getInstList().insert(BCI, I);
11155 ReplaceInstUsesWith(*BCI, I);
11160 return new BitCastInst(BCI->getOperand(0), GEP.getType());
11163 // Otherwise, if the offset is non-zero, we need to find out if there is a
11164 // field at Offset in 'A's type. If so, we can pull the cast through the
11166 SmallVector<Value*, 8> NewIndices;
11168 cast<PointerType>(BCI->getOperand(0)->getType())->getElementType();
11169 if (FindElementAtOffset(InTy, Offset, NewIndices, TD, Context)) {
11170 Value *NGEP = cast<GEPOperator>(&GEP)->isInBounds() ?
11171 Builder->CreateInBoundsGEP(BCI->getOperand(0), NewIndices.begin(),
11172 NewIndices.end()) :
11173 Builder->CreateGEP(BCI->getOperand(0), NewIndices.begin(),
11176 if (NGEP->getType() == GEP.getType())
11177 return ReplaceInstUsesWith(GEP, NGEP);
11178 NGEP->takeName(&GEP);
11179 return new BitCastInst(NGEP, GEP.getType());
11187 Instruction *InstCombiner::visitAllocationInst(AllocationInst &AI) {
11188 // Convert: malloc Ty, C - where C is a constant != 1 into: malloc [C x Ty], 1
11189 if (AI.isArrayAllocation()) { // Check C != 1
11190 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
11191 const Type *NewTy =
11192 ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
11193 AllocationInst *New = 0;
11195 // Create and insert the replacement instruction...
11196 if (isa<MallocInst>(AI))
11197 New = Builder->CreateMalloc(NewTy, 0, AI.getName());
11199 assert(isa<AllocaInst>(AI) && "Unknown type of allocation inst!");
11200 New = Builder->CreateAlloca(NewTy, 0, AI.getName());
11202 New->setAlignment(AI.getAlignment());
11204 // Scan to the end of the allocation instructions, to skip over a block of
11205 // allocas if possible...also skip interleaved debug info
11207 BasicBlock::iterator It = New;
11208 while (isa<AllocationInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It;
11210 // Now that I is pointing to the first non-allocation-inst in the block,
11211 // insert our getelementptr instruction...
11213 Value *NullIdx = Constant::getNullValue(Type::getInt32Ty(*Context));
11217 Value *V = GetElementPtrInst::CreateInBounds(New, Idx, Idx + 2,
11218 New->getName()+".sub", It);
11220 // Now make everything use the getelementptr instead of the original
11222 return ReplaceInstUsesWith(AI, V);
11223 } else if (isa<UndefValue>(AI.getArraySize())) {
11224 return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
11228 if (TD && isa<AllocaInst>(AI) && AI.getAllocatedType()->isSized()) {
11229 // If alloca'ing a zero byte object, replace the alloca with a null pointer.
11230 // Note that we only do this for alloca's, because malloc should allocate
11231 // and return a unique pointer, even for a zero byte allocation.
11232 if (TD->getTypeAllocSize(AI.getAllocatedType()) == 0)
11233 return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
11235 // If the alignment is 0 (unspecified), assign it the preferred alignment.
11236 if (AI.getAlignment() == 0)
11237 AI.setAlignment(TD->getPrefTypeAlignment(AI.getAllocatedType()));
11243 Instruction *InstCombiner::visitFreeInst(FreeInst &FI) {
11244 Value *Op = FI.getOperand(0);
11246 // free undef -> unreachable.
11247 if (isa<UndefValue>(Op)) {
11248 // Insert a new store to null because we cannot modify the CFG here.
11249 new StoreInst(ConstantInt::getTrue(*Context),
11250 UndefValue::get(Type::getInt1PtrTy(*Context)), &FI);
11251 return EraseInstFromFunction(FI);
11254 // If we have 'free null' delete the instruction. This can happen in stl code
11255 // when lots of inlining happens.
11256 if (isa<ConstantPointerNull>(Op))
11257 return EraseInstFromFunction(FI);
11259 // Change free <ty>* (cast <ty2>* X to <ty>*) into free <ty2>* X
11260 if (BitCastInst *CI = dyn_cast<BitCastInst>(Op)) {
11261 FI.setOperand(0, CI->getOperand(0));
11265 // Change free (gep X, 0,0,0,0) into free(X)
11266 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
11267 if (GEPI->hasAllZeroIndices()) {
11268 Worklist.Add(GEPI);
11269 FI.setOperand(0, GEPI->getOperand(0));
11274 // Change free(malloc) into nothing, if the malloc has a single use.
11275 if (MallocInst *MI = dyn_cast<MallocInst>(Op))
11276 if (MI->hasOneUse()) {
11277 EraseInstFromFunction(FI);
11278 return EraseInstFromFunction(*MI);
11280 if (isMalloc(Op)) {
11281 if (CallInst* CI = extractMallocCallFromBitCast(Op)) {
11282 if (Op->hasOneUse() && CI->hasOneUse()) {
11283 EraseInstFromFunction(FI);
11284 EraseInstFromFunction(*CI);
11285 return EraseInstFromFunction(*cast<Instruction>(Op));
11288 // Op is a call to malloc
11289 if (Op->hasOneUse()) {
11290 EraseInstFromFunction(FI);
11291 return EraseInstFromFunction(*cast<Instruction>(Op));
11300 /// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible.
11301 static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
11302 const TargetData *TD) {
11303 User *CI = cast<User>(LI.getOperand(0));
11304 Value *CastOp = CI->getOperand(0);
11305 LLVMContext *Context = IC.getContext();
11308 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(CI)) {
11309 // Instead of loading constant c string, use corresponding integer value
11310 // directly if string length is small enough.
11312 if (GetConstantStringInfo(CE->getOperand(0), Str) && !Str.empty()) {
11313 unsigned len = Str.length();
11314 const Type *Ty = cast<PointerType>(CE->getType())->getElementType();
11315 unsigned numBits = Ty->getPrimitiveSizeInBits();
11316 // Replace LI with immediate integer store.
11317 if ((numBits >> 3) == len + 1) {
11318 APInt StrVal(numBits, 0);
11319 APInt SingleChar(numBits, 0);
11320 if (TD->isLittleEndian()) {
11321 for (signed i = len-1; i >= 0; i--) {
11322 SingleChar = (uint64_t) Str[i] & UCHAR_MAX;
11323 StrVal = (StrVal << 8) | SingleChar;
11326 for (unsigned i = 0; i < len; i++) {
11327 SingleChar = (uint64_t) Str[i] & UCHAR_MAX;
11328 StrVal = (StrVal << 8) | SingleChar;
11330 // Append NULL at the end.
11332 StrVal = (StrVal << 8) | SingleChar;
11334 Value *NL = ConstantInt::get(*Context, StrVal);
11335 return IC.ReplaceInstUsesWith(LI, NL);
11341 const PointerType *DestTy = cast<PointerType>(CI->getType());
11342 const Type *DestPTy = DestTy->getElementType();
11343 if (const PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType())) {
11345 // If the address spaces don't match, don't eliminate the cast.
11346 if (DestTy->getAddressSpace() != SrcTy->getAddressSpace())
11349 const Type *SrcPTy = SrcTy->getElementType();
11351 if (DestPTy->isInteger() || isa<PointerType>(DestPTy) ||
11352 isa<VectorType>(DestPTy)) {
11353 // If the source is an array, the code below will not succeed. Check to
11354 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
11356 if (const ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy))
11357 if (Constant *CSrc = dyn_cast<Constant>(CastOp))
11358 if (ASrcTy->getNumElements() != 0) {
11360 Idxs[0] = Idxs[1] = Constant::getNullValue(Type::getInt32Ty(*Context));
11361 CastOp = ConstantExpr::getGetElementPtr(CSrc, Idxs, 2);
11362 SrcTy = cast<PointerType>(CastOp->getType());
11363 SrcPTy = SrcTy->getElementType();
11366 if (IC.getTargetData() &&
11367 (SrcPTy->isInteger() || isa<PointerType>(SrcPTy) ||
11368 isa<VectorType>(SrcPTy)) &&
11369 // Do not allow turning this into a load of an integer, which is then
11370 // casted to a pointer, this pessimizes pointer analysis a lot.
11371 (isa<PointerType>(SrcPTy) == isa<PointerType>(LI.getType())) &&
11372 IC.getTargetData()->getTypeSizeInBits(SrcPTy) ==
11373 IC.getTargetData()->getTypeSizeInBits(DestPTy)) {
11375 // Okay, we are casting from one integer or pointer type to another of
11376 // the same size. Instead of casting the pointer before the load, cast
11377 // the result of the loaded value.
11379 IC.Builder->CreateLoad(CastOp, LI.isVolatile(), CI->getName());
11380 // Now cast the result of the load.
11381 return new BitCastInst(NewLoad, LI.getType());
11388 Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
11389 Value *Op = LI.getOperand(0);
11391 // Attempt to improve the alignment.
11393 unsigned KnownAlign =
11394 GetOrEnforceKnownAlignment(Op, TD->getPrefTypeAlignment(LI.getType()));
11396 (LI.getAlignment() == 0 ? TD->getABITypeAlignment(LI.getType()) :
11397 LI.getAlignment()))
11398 LI.setAlignment(KnownAlign);
11401 // load (cast X) --> cast (load X) iff safe.
11402 if (isa<CastInst>(Op))
11403 if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
11406 // None of the following transforms are legal for volatile loads.
11407 if (LI.isVolatile()) return 0;
11409 // Do really simple store-to-load forwarding and load CSE, to catch cases
11410 // where there are several consequtive memory accesses to the same location,
11411 // separated by a few arithmetic operations.
11412 BasicBlock::iterator BBI = &LI;
11413 if (Value *AvailableVal = FindAvailableLoadedValue(Op, LI.getParent(), BBI,6))
11414 return ReplaceInstUsesWith(LI, AvailableVal);
11416 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
11417 const Value *GEPI0 = GEPI->getOperand(0);
11418 // TODO: Consider a target hook for valid address spaces for this xform.
11419 if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
11420 // Insert a new store to null instruction before the load to indicate
11421 // that this code is not reachable. We do this instead of inserting
11422 // an unreachable instruction directly because we cannot modify the
11424 new StoreInst(UndefValue::get(LI.getType()),
11425 Constant::getNullValue(Op->getType()), &LI);
11426 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
11430 if (Constant *C = dyn_cast<Constant>(Op)) {
11431 // load null/undef -> undef
11432 // TODO: Consider a target hook for valid address spaces for this xform.
11433 if (isa<UndefValue>(C) ||
11434 (C->isNullValue() && LI.getPointerAddressSpace() == 0)) {
11435 // Insert a new store to null instruction before the load to indicate that
11436 // this code is not reachable. We do this instead of inserting an
11437 // unreachable instruction directly because we cannot modify the CFG.
11438 new StoreInst(UndefValue::get(LI.getType()),
11439 Constant::getNullValue(Op->getType()), &LI);
11440 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
11443 // Instcombine load (constant global) into the value loaded.
11444 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op))
11445 if (GV->isConstant() && GV->hasDefinitiveInitializer())
11446 return ReplaceInstUsesWith(LI, GV->getInitializer());
11448 // Instcombine load (constantexpr_GEP global, 0, ...) into the value loaded.
11449 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op)) {
11450 if (CE->getOpcode() == Instruction::GetElementPtr) {
11451 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CE->getOperand(0)))
11452 if (GV->isConstant() && GV->hasDefinitiveInitializer())
11454 ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE))
11455 return ReplaceInstUsesWith(LI, V);
11456 if (CE->getOperand(0)->isNullValue()) {
11457 // Insert a new store to null instruction before the load to indicate
11458 // that this code is not reachable. We do this instead of inserting
11459 // an unreachable instruction directly because we cannot modify the
11461 new StoreInst(UndefValue::get(LI.getType()),
11462 Constant::getNullValue(Op->getType()), &LI);
11463 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
11466 } else if (CE->isCast()) {
11467 if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
11473 // If this load comes from anywhere in a constant global, and if the global
11474 // is all undef or zero, we know what it loads.
11475 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op->getUnderlyingObject())){
11476 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
11477 if (GV->getInitializer()->isNullValue())
11478 return ReplaceInstUsesWith(LI, Constant::getNullValue(LI.getType()));
11479 else if (isa<UndefValue>(GV->getInitializer()))
11480 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
11484 if (Op->hasOneUse()) {
11485 // Change select and PHI nodes to select values instead of addresses: this
11486 // helps alias analysis out a lot, allows many others simplifications, and
11487 // exposes redundancy in the code.
11489 // Note that we cannot do the transformation unless we know that the
11490 // introduced loads cannot trap! Something like this is valid as long as
11491 // the condition is always false: load (select bool %C, int* null, int* %G),
11492 // but it would not be valid if we transformed it to load from null
11493 // unconditionally.
11495 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
11496 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
11497 if (isSafeToLoadUnconditionally(SI->getOperand(1), SI) &&
11498 isSafeToLoadUnconditionally(SI->getOperand(2), SI)) {
11499 Value *V1 = Builder->CreateLoad(SI->getOperand(1),
11500 SI->getOperand(1)->getName()+".val");
11501 Value *V2 = Builder->CreateLoad(SI->getOperand(2),
11502 SI->getOperand(2)->getName()+".val");
11503 return SelectInst::Create(SI->getCondition(), V1, V2);
11506 // load (select (cond, null, P)) -> load P
11507 if (Constant *C = dyn_cast<Constant>(SI->getOperand(1)))
11508 if (C->isNullValue()) {
11509 LI.setOperand(0, SI->getOperand(2));
11513 // load (select (cond, P, null)) -> load P
11514 if (Constant *C = dyn_cast<Constant>(SI->getOperand(2)))
11515 if (C->isNullValue()) {
11516 LI.setOperand(0, SI->getOperand(1));
11524 /// InstCombineStoreToCast - Fold store V, (cast P) -> store (cast V), P
11525 /// when possible. This makes it generally easy to do alias analysis and/or
11526 /// SROA/mem2reg of the memory object.
11527 static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
11528 User *CI = cast<User>(SI.getOperand(1));
11529 Value *CastOp = CI->getOperand(0);
11531 const Type *DestPTy = cast<PointerType>(CI->getType())->getElementType();
11532 const PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType());
11533 if (SrcTy == 0) return 0;
11535 const Type *SrcPTy = SrcTy->getElementType();
11537 if (!DestPTy->isInteger() && !isa<PointerType>(DestPTy))
11540 /// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep"
11541 /// to its first element. This allows us to handle things like:
11542 /// store i32 xxx, (bitcast {foo*, float}* %P to i32*)
11543 /// on 32-bit hosts.
11544 SmallVector<Value*, 4> NewGEPIndices;
11546 // If the source is an array, the code below will not succeed. Check to
11547 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
11549 if (isa<ArrayType>(SrcPTy) || isa<StructType>(SrcPTy)) {
11550 // Index through pointer.
11551 Constant *Zero = Constant::getNullValue(Type::getInt32Ty(*IC.getContext()));
11552 NewGEPIndices.push_back(Zero);
11555 if (const StructType *STy = dyn_cast<StructType>(SrcPTy)) {
11556 if (!STy->getNumElements()) /* Struct can be empty {} */
11558 NewGEPIndices.push_back(Zero);
11559 SrcPTy = STy->getElementType(0);
11560 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcPTy)) {
11561 NewGEPIndices.push_back(Zero);
11562 SrcPTy = ATy->getElementType();
11568 SrcTy = PointerType::get(SrcPTy, SrcTy->getAddressSpace());
11571 if (!SrcPTy->isInteger() && !isa<PointerType>(SrcPTy))
11574 // If the pointers point into different address spaces or if they point to
11575 // values with different sizes, we can't do the transformation.
11576 if (!IC.getTargetData() ||
11577 SrcTy->getAddressSpace() !=
11578 cast<PointerType>(CI->getType())->getAddressSpace() ||
11579 IC.getTargetData()->getTypeSizeInBits(SrcPTy) !=
11580 IC.getTargetData()->getTypeSizeInBits(DestPTy))
11583 // Okay, we are casting from one integer or pointer type to another of
11584 // the same size. Instead of casting the pointer before
11585 // the store, cast the value to be stored.
11587 Value *SIOp0 = SI.getOperand(0);
11588 Instruction::CastOps opcode = Instruction::BitCast;
11589 const Type* CastSrcTy = SIOp0->getType();
11590 const Type* CastDstTy = SrcPTy;
11591 if (isa<PointerType>(CastDstTy)) {
11592 if (CastSrcTy->isInteger())
11593 opcode = Instruction::IntToPtr;
11594 } else if (isa<IntegerType>(CastDstTy)) {
11595 if (isa<PointerType>(SIOp0->getType()))
11596 opcode = Instruction::PtrToInt;
11599 // SIOp0 is a pointer to aggregate and this is a store to the first field,
11600 // emit a GEP to index into its first field.
11601 if (!NewGEPIndices.empty())
11602 CastOp = IC.Builder->CreateInBoundsGEP(CastOp, NewGEPIndices.begin(),
11603 NewGEPIndices.end());
11605 NewCast = IC.Builder->CreateCast(opcode, SIOp0, CastDstTy,
11606 SIOp0->getName()+".c");
11607 return new StoreInst(NewCast, CastOp);
11610 /// equivalentAddressValues - Test if A and B will obviously have the same
11611 /// value. This includes recognizing that %t0 and %t1 will have the same
11612 /// value in code like this:
11613 /// %t0 = getelementptr \@a, 0, 3
11614 /// store i32 0, i32* %t0
11615 /// %t1 = getelementptr \@a, 0, 3
11616 /// %t2 = load i32* %t1
11618 static bool equivalentAddressValues(Value *A, Value *B) {
11619 // Test if the values are trivially equivalent.
11620 if (A == B) return true;
11622 // Test if the values come form identical arithmetic instructions.
11623 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
11624 // its only used to compare two uses within the same basic block, which
11625 // means that they'll always either have the same value or one of them
11626 // will have an undefined value.
11627 if (isa<BinaryOperator>(A) ||
11628 isa<CastInst>(A) ||
11630 isa<GetElementPtrInst>(A))
11631 if (Instruction *BI = dyn_cast<Instruction>(B))
11632 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
11635 // Otherwise they may not be equivalent.
11639 // If this instruction has two uses, one of which is a llvm.dbg.declare,
11640 // return the llvm.dbg.declare.
11641 DbgDeclareInst *InstCombiner::hasOneUsePlusDeclare(Value *V) {
11642 if (!V->hasNUses(2))
11644 for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
11646 if (DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(UI))
11648 if (isa<BitCastInst>(UI) && UI->hasOneUse()) {
11649 if (DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(UI->use_begin()))
11656 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
11657 Value *Val = SI.getOperand(0);
11658 Value *Ptr = SI.getOperand(1);
11660 if (isa<UndefValue>(Ptr)) { // store X, undef -> noop (even if volatile)
11661 EraseInstFromFunction(SI);
11666 // If the RHS is an alloca with a single use, zapify the store, making the
11668 // If the RHS is an alloca with a two uses, the other one being a
11669 // llvm.dbg.declare, zapify the store and the declare, making the
11670 // alloca dead. We must do this to prevent declare's from affecting
11672 if (!SI.isVolatile()) {
11673 if (Ptr->hasOneUse()) {
11674 if (isa<AllocaInst>(Ptr)) {
11675 EraseInstFromFunction(SI);
11679 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
11680 if (isa<AllocaInst>(GEP->getOperand(0))) {
11681 if (GEP->getOperand(0)->hasOneUse()) {
11682 EraseInstFromFunction(SI);
11686 if (DbgDeclareInst *DI = hasOneUsePlusDeclare(GEP->getOperand(0))) {
11687 EraseInstFromFunction(*DI);
11688 EraseInstFromFunction(SI);
11695 if (DbgDeclareInst *DI = hasOneUsePlusDeclare(Ptr)) {
11696 EraseInstFromFunction(*DI);
11697 EraseInstFromFunction(SI);
11703 // Attempt to improve the alignment.
11705 unsigned KnownAlign =
11706 GetOrEnforceKnownAlignment(Ptr, TD->getPrefTypeAlignment(Val->getType()));
11708 (SI.getAlignment() == 0 ? TD->getABITypeAlignment(Val->getType()) :
11709 SI.getAlignment()))
11710 SI.setAlignment(KnownAlign);
11713 // Do really simple DSE, to catch cases where there are several consecutive
11714 // stores to the same location, separated by a few arithmetic operations. This
11715 // situation often occurs with bitfield accesses.
11716 BasicBlock::iterator BBI = &SI;
11717 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
11720 // Don't count debug info directives, lest they affect codegen,
11721 // and we skip pointer-to-pointer bitcasts, which are NOPs.
11722 // It is necessary for correctness to skip those that feed into a
11723 // llvm.dbg.declare, as these are not present when debugging is off.
11724 if (isa<DbgInfoIntrinsic>(BBI) ||
11725 (isa<BitCastInst>(BBI) && isa<PointerType>(BBI->getType()))) {
11730 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
11731 // Prev store isn't volatile, and stores to the same location?
11732 if (!PrevSI->isVolatile() &&equivalentAddressValues(PrevSI->getOperand(1),
11733 SI.getOperand(1))) {
11736 EraseInstFromFunction(*PrevSI);
11742 // If this is a load, we have to stop. However, if the loaded value is from
11743 // the pointer we're loading and is producing the pointer we're storing,
11744 // then *this* store is dead (X = load P; store X -> P).
11745 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
11746 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
11747 !SI.isVolatile()) {
11748 EraseInstFromFunction(SI);
11752 // Otherwise, this is a load from some other location. Stores before it
11753 // may not be dead.
11757 // Don't skip over loads or things that can modify memory.
11758 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
11763 if (SI.isVolatile()) return 0; // Don't hack volatile stores.
11765 // store X, null -> turns into 'unreachable' in SimplifyCFG
11766 if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
11767 if (!isa<UndefValue>(Val)) {
11768 SI.setOperand(0, UndefValue::get(Val->getType()));
11769 if (Instruction *U = dyn_cast<Instruction>(Val))
11770 Worklist.Add(U); // Dropped a use.
11773 return 0; // Do not modify these!
11776 // store undef, Ptr -> noop
11777 if (isa<UndefValue>(Val)) {
11778 EraseInstFromFunction(SI);
11783 // If the pointer destination is a cast, see if we can fold the cast into the
11785 if (isa<CastInst>(Ptr))
11786 if (Instruction *Res = InstCombineStoreToCast(*this, SI))
11788 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
11790 if (Instruction *Res = InstCombineStoreToCast(*this, SI))
11794 // If this store is the last instruction in the basic block (possibly
11795 // excepting debug info instructions and the pointer bitcasts that feed
11796 // into them), and if the block ends with an unconditional branch, try
11797 // to move it to the successor block.
11801 } while (isa<DbgInfoIntrinsic>(BBI) ||
11802 (isa<BitCastInst>(BBI) && isa<PointerType>(BBI->getType())));
11803 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
11804 if (BI->isUnconditional())
11805 if (SimplifyStoreAtEndOfBlock(SI))
11806 return 0; // xform done!
11811 /// SimplifyStoreAtEndOfBlock - Turn things like:
11812 /// if () { *P = v1; } else { *P = v2 }
11813 /// into a phi node with a store in the successor.
11815 /// Simplify things like:
11816 /// *P = v1; if () { *P = v2; }
11817 /// into a phi node with a store in the successor.
11819 bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
11820 BasicBlock *StoreBB = SI.getParent();
11822 // Check to see if the successor block has exactly two incoming edges. If
11823 // so, see if the other predecessor contains a store to the same location.
11824 // if so, insert a PHI node (if needed) and move the stores down.
11825 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
11827 // Determine whether Dest has exactly two predecessors and, if so, compute
11828 // the other predecessor.
11829 pred_iterator PI = pred_begin(DestBB);
11830 BasicBlock *OtherBB = 0;
11831 if (*PI != StoreBB)
11834 if (PI == pred_end(DestBB))
11837 if (*PI != StoreBB) {
11842 if (++PI != pred_end(DestBB))
11845 // Bail out if all the relevant blocks aren't distinct (this can happen,
11846 // for example, if SI is in an infinite loop)
11847 if (StoreBB == DestBB || OtherBB == DestBB)
11850 // Verify that the other block ends in a branch and is not otherwise empty.
11851 BasicBlock::iterator BBI = OtherBB->getTerminator();
11852 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
11853 if (!OtherBr || BBI == OtherBB->begin())
11856 // If the other block ends in an unconditional branch, check for the 'if then
11857 // else' case. there is an instruction before the branch.
11858 StoreInst *OtherStore = 0;
11859 if (OtherBr->isUnconditional()) {
11861 // Skip over debugging info.
11862 while (isa<DbgInfoIntrinsic>(BBI) ||
11863 (isa<BitCastInst>(BBI) && isa<PointerType>(BBI->getType()))) {
11864 if (BBI==OtherBB->begin())
11868 // If this isn't a store, or isn't a store to the same location, bail out.
11869 OtherStore = dyn_cast<StoreInst>(BBI);
11870 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1))
11873 // Otherwise, the other block ended with a conditional branch. If one of the
11874 // destinations is StoreBB, then we have the if/then case.
11875 if (OtherBr->getSuccessor(0) != StoreBB &&
11876 OtherBr->getSuccessor(1) != StoreBB)
11879 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
11880 // if/then triangle. See if there is a store to the same ptr as SI that
11881 // lives in OtherBB.
11883 // Check to see if we find the matching store.
11884 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
11885 if (OtherStore->getOperand(1) != SI.getOperand(1))
11889 // If we find something that may be using or overwriting the stored
11890 // value, or if we run out of instructions, we can't do the xform.
11891 if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
11892 BBI == OtherBB->begin())
11896 // In order to eliminate the store in OtherBr, we have to
11897 // make sure nothing reads or overwrites the stored value in
11899 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
11900 // FIXME: This should really be AA driven.
11901 if (I->mayReadFromMemory() || I->mayWriteToMemory())
11906 // Insert a PHI node now if we need it.
11907 Value *MergedVal = OtherStore->getOperand(0);
11908 if (MergedVal != SI.getOperand(0)) {
11909 PHINode *PN = PHINode::Create(MergedVal->getType(), "storemerge");
11910 PN->reserveOperandSpace(2);
11911 PN->addIncoming(SI.getOperand(0), SI.getParent());
11912 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
11913 MergedVal = InsertNewInstBefore(PN, DestBB->front());
11916 // Advance to a place where it is safe to insert the new store and
11918 BBI = DestBB->getFirstNonPHI();
11919 InsertNewInstBefore(new StoreInst(MergedVal, SI.getOperand(1),
11920 OtherStore->isVolatile()), *BBI);
11922 // Nuke the old stores.
11923 EraseInstFromFunction(SI);
11924 EraseInstFromFunction(*OtherStore);
11930 Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
11931 // Change br (not X), label True, label False to: br X, label False, True
11933 BasicBlock *TrueDest;
11934 BasicBlock *FalseDest;
11935 if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest)) &&
11936 !isa<Constant>(X)) {
11937 // Swap Destinations and condition...
11938 BI.setCondition(X);
11939 BI.setSuccessor(0, FalseDest);
11940 BI.setSuccessor(1, TrueDest);
11944 // Cannonicalize fcmp_one -> fcmp_oeq
11945 FCmpInst::Predicate FPred; Value *Y;
11946 if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)),
11947 TrueDest, FalseDest)) &&
11948 BI.getCondition()->hasOneUse())
11949 if (FPred == FCmpInst::FCMP_ONE || FPred == FCmpInst::FCMP_OLE ||
11950 FPred == FCmpInst::FCMP_OGE) {
11951 FCmpInst *Cond = cast<FCmpInst>(BI.getCondition());
11952 Cond->setPredicate(FCmpInst::getInversePredicate(FPred));
11954 // Swap Destinations and condition.
11955 BI.setSuccessor(0, FalseDest);
11956 BI.setSuccessor(1, TrueDest);
11957 Worklist.Add(Cond);
11961 // Cannonicalize icmp_ne -> icmp_eq
11962 ICmpInst::Predicate IPred;
11963 if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)),
11964 TrueDest, FalseDest)) &&
11965 BI.getCondition()->hasOneUse())
11966 if (IPred == ICmpInst::ICMP_NE || IPred == ICmpInst::ICMP_ULE ||
11967 IPred == ICmpInst::ICMP_SLE || IPred == ICmpInst::ICMP_UGE ||
11968 IPred == ICmpInst::ICMP_SGE) {
11969 ICmpInst *Cond = cast<ICmpInst>(BI.getCondition());
11970 Cond->setPredicate(ICmpInst::getInversePredicate(IPred));
11971 // Swap Destinations and condition.
11972 BI.setSuccessor(0, FalseDest);
11973 BI.setSuccessor(1, TrueDest);
11974 Worklist.Add(Cond);
11981 Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
11982 Value *Cond = SI.getCondition();
11983 if (Instruction *I = dyn_cast<Instruction>(Cond)) {
11984 if (I->getOpcode() == Instruction::Add)
11985 if (ConstantInt *AddRHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
11986 // change 'switch (X+4) case 1:' into 'switch (X) case -3'
11987 for (unsigned i = 2, e = SI.getNumOperands(); i != e; i += 2)
11989 ConstantExpr::getSub(cast<Constant>(SI.getOperand(i)),
11991 SI.setOperand(0, I->getOperand(0));
11999 Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
12000 Value *Agg = EV.getAggregateOperand();
12002 if (!EV.hasIndices())
12003 return ReplaceInstUsesWith(EV, Agg);
12005 if (Constant *C = dyn_cast<Constant>(Agg)) {
12006 if (isa<UndefValue>(C))
12007 return ReplaceInstUsesWith(EV, UndefValue::get(EV.getType()));
12009 if (isa<ConstantAggregateZero>(C))
12010 return ReplaceInstUsesWith(EV, Constant::getNullValue(EV.getType()));
12012 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C)) {
12013 // Extract the element indexed by the first index out of the constant
12014 Value *V = C->getOperand(*EV.idx_begin());
12015 if (EV.getNumIndices() > 1)
12016 // Extract the remaining indices out of the constant indexed by the
12018 return ExtractValueInst::Create(V, EV.idx_begin() + 1, EV.idx_end());
12020 return ReplaceInstUsesWith(EV, V);
12022 return 0; // Can't handle other constants
12024 if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
12025 // We're extracting from an insertvalue instruction, compare the indices
12026 const unsigned *exti, *exte, *insi, *inse;
12027 for (exti = EV.idx_begin(), insi = IV->idx_begin(),
12028 exte = EV.idx_end(), inse = IV->idx_end();
12029 exti != exte && insi != inse;
12031 if (*insi != *exti)
12032 // The insert and extract both reference distinctly different elements.
12033 // This means the extract is not influenced by the insert, and we can
12034 // replace the aggregate operand of the extract with the aggregate
12035 // operand of the insert. i.e., replace
12036 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
12037 // %E = extractvalue { i32, { i32 } } %I, 0
12039 // %E = extractvalue { i32, { i32 } } %A, 0
12040 return ExtractValueInst::Create(IV->getAggregateOperand(),
12041 EV.idx_begin(), EV.idx_end());
12043 if (exti == exte && insi == inse)
12044 // Both iterators are at the end: Index lists are identical. Replace
12045 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
12046 // %C = extractvalue { i32, { i32 } } %B, 1, 0
12048 return ReplaceInstUsesWith(EV, IV->getInsertedValueOperand());
12049 if (exti == exte) {
12050 // The extract list is a prefix of the insert list. i.e. replace
12051 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
12052 // %E = extractvalue { i32, { i32 } } %I, 1
12054 // %X = extractvalue { i32, { i32 } } %A, 1
12055 // %E = insertvalue { i32 } %X, i32 42, 0
12056 // by switching the order of the insert and extract (though the
12057 // insertvalue should be left in, since it may have other uses).
12058 Value *NewEV = Builder->CreateExtractValue(IV->getAggregateOperand(),
12059 EV.idx_begin(), EV.idx_end());
12060 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
12064 // The insert list is a prefix of the extract list
12065 // We can simply remove the common indices from the extract and make it
12066 // operate on the inserted value instead of the insertvalue result.
12068 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
12069 // %E = extractvalue { i32, { i32 } } %I, 1, 0
12071 // %E extractvalue { i32 } { i32 42 }, 0
12072 return ExtractValueInst::Create(IV->getInsertedValueOperand(),
12075 // Can't simplify extracts from other values. Note that nested extracts are
12076 // already simplified implicitely by the above (extract ( extract (insert) )
12077 // will be translated into extract ( insert ( extract ) ) first and then just
12078 // the value inserted, if appropriate).
12082 /// CheapToScalarize - Return true if the value is cheaper to scalarize than it
12083 /// is to leave as a vector operation.
12084 static bool CheapToScalarize(Value *V, bool isConstant) {
12085 if (isa<ConstantAggregateZero>(V))
12087 if (ConstantVector *C = dyn_cast<ConstantVector>(V)) {
12088 if (isConstant) return true;
12089 // If all elts are the same, we can extract.
12090 Constant *Op0 = C->getOperand(0);
12091 for (unsigned i = 1; i < C->getNumOperands(); ++i)
12092 if (C->getOperand(i) != Op0)
12096 Instruction *I = dyn_cast<Instruction>(V);
12097 if (!I) return false;
12099 // Insert element gets simplified to the inserted element or is deleted if
12100 // this is constant idx extract element and its a constant idx insertelt.
12101 if (I->getOpcode() == Instruction::InsertElement && isConstant &&
12102 isa<ConstantInt>(I->getOperand(2)))
12104 if (I->getOpcode() == Instruction::Load && I->hasOneUse())
12106 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I))
12107 if (BO->hasOneUse() &&
12108 (CheapToScalarize(BO->getOperand(0), isConstant) ||
12109 CheapToScalarize(BO->getOperand(1), isConstant)))
12111 if (CmpInst *CI = dyn_cast<CmpInst>(I))
12112 if (CI->hasOneUse() &&
12113 (CheapToScalarize(CI->getOperand(0), isConstant) ||
12114 CheapToScalarize(CI->getOperand(1), isConstant)))
12120 /// Read and decode a shufflevector mask.
12122 /// It turns undef elements into values that are larger than the number of
12123 /// elements in the input.
12124 static std::vector<unsigned> getShuffleMask(const ShuffleVectorInst *SVI) {
12125 unsigned NElts = SVI->getType()->getNumElements();
12126 if (isa<ConstantAggregateZero>(SVI->getOperand(2)))
12127 return std::vector<unsigned>(NElts, 0);
12128 if (isa<UndefValue>(SVI->getOperand(2)))
12129 return std::vector<unsigned>(NElts, 2*NElts);
12131 std::vector<unsigned> Result;
12132 const ConstantVector *CP = cast<ConstantVector>(SVI->getOperand(2));
12133 for (User::const_op_iterator i = CP->op_begin(), e = CP->op_end(); i!=e; ++i)
12134 if (isa<UndefValue>(*i))
12135 Result.push_back(NElts*2); // undef -> 8
12137 Result.push_back(cast<ConstantInt>(*i)->getZExtValue());
12141 /// FindScalarElement - Given a vector and an element number, see if the scalar
12142 /// value is already around as a register, for example if it were inserted then
12143 /// extracted from the vector.
12144 static Value *FindScalarElement(Value *V, unsigned EltNo,
12145 LLVMContext *Context) {
12146 assert(isa<VectorType>(V->getType()) && "Not looking at a vector?");
12147 const VectorType *PTy = cast<VectorType>(V->getType());
12148 unsigned Width = PTy->getNumElements();
12149 if (EltNo >= Width) // Out of range access.
12150 return UndefValue::get(PTy->getElementType());
12152 if (isa<UndefValue>(V))
12153 return UndefValue::get(PTy->getElementType());
12154 else if (isa<ConstantAggregateZero>(V))
12155 return Constant::getNullValue(PTy->getElementType());
12156 else if (ConstantVector *CP = dyn_cast<ConstantVector>(V))
12157 return CP->getOperand(EltNo);
12158 else if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) {
12159 // If this is an insert to a variable element, we don't know what it is.
12160 if (!isa<ConstantInt>(III->getOperand(2)))
12162 unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue();
12164 // If this is an insert to the element we are looking for, return the
12166 if (EltNo == IIElt)
12167 return III->getOperand(1);
12169 // Otherwise, the insertelement doesn't modify the value, recurse on its
12171 return FindScalarElement(III->getOperand(0), EltNo, Context);
12172 } else if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V)) {
12173 unsigned LHSWidth =
12174 cast<VectorType>(SVI->getOperand(0)->getType())->getNumElements();
12175 unsigned InEl = getShuffleMask(SVI)[EltNo];
12176 if (InEl < LHSWidth)
12177 return FindScalarElement(SVI->getOperand(0), InEl, Context);
12178 else if (InEl < LHSWidth*2)
12179 return FindScalarElement(SVI->getOperand(1), InEl - LHSWidth, Context);
12181 return UndefValue::get(PTy->getElementType());
12184 // Otherwise, we don't know.
12188 Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
12189 // If vector val is undef, replace extract with scalar undef.
12190 if (isa<UndefValue>(EI.getOperand(0)))
12191 return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType()));
12193 // If vector val is constant 0, replace extract with scalar 0.
12194 if (isa<ConstantAggregateZero>(EI.getOperand(0)))
12195 return ReplaceInstUsesWith(EI, Constant::getNullValue(EI.getType()));
12197 if (ConstantVector *C = dyn_cast<ConstantVector>(EI.getOperand(0))) {
12198 // If vector val is constant with all elements the same, replace EI with
12199 // that element. When the elements are not identical, we cannot replace yet
12200 // (we do that below, but only when the index is constant).
12201 Constant *op0 = C->getOperand(0);
12202 for (unsigned i = 1; i != C->getNumOperands(); ++i)
12203 if (C->getOperand(i) != op0) {
12208 return ReplaceInstUsesWith(EI, op0);
12211 // If extracting a specified index from the vector, see if we can recursively
12212 // find a previously computed scalar that was inserted into the vector.
12213 if (ConstantInt *IdxC = dyn_cast<ConstantInt>(EI.getOperand(1))) {
12214 unsigned IndexVal = IdxC->getZExtValue();
12215 unsigned VectorWidth = EI.getVectorOperandType()->getNumElements();
12217 // If this is extracting an invalid index, turn this into undef, to avoid
12218 // crashing the code below.
12219 if (IndexVal >= VectorWidth)
12220 return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType()));
12222 // This instruction only demands the single element from the input vector.
12223 // If the input vector has a single use, simplify it based on this use
12225 if (EI.getOperand(0)->hasOneUse() && VectorWidth != 1) {
12226 APInt UndefElts(VectorWidth, 0);
12227 APInt DemandedMask(VectorWidth, 1 << IndexVal);
12228 if (Value *V = SimplifyDemandedVectorElts(EI.getOperand(0),
12229 DemandedMask, UndefElts)) {
12230 EI.setOperand(0, V);
12235 if (Value *Elt = FindScalarElement(EI.getOperand(0), IndexVal, Context))
12236 return ReplaceInstUsesWith(EI, Elt);
12238 // If the this extractelement is directly using a bitcast from a vector of
12239 // the same number of elements, see if we can find the source element from
12240 // it. In this case, we will end up needing to bitcast the scalars.
12241 if (BitCastInst *BCI = dyn_cast<BitCastInst>(EI.getOperand(0))) {
12242 if (const VectorType *VT =
12243 dyn_cast<VectorType>(BCI->getOperand(0)->getType()))
12244 if (VT->getNumElements() == VectorWidth)
12245 if (Value *Elt = FindScalarElement(BCI->getOperand(0),
12246 IndexVal, Context))
12247 return new BitCastInst(Elt, EI.getType());
12251 if (Instruction *I = dyn_cast<Instruction>(EI.getOperand(0))) {
12252 // Push extractelement into predecessor operation if legal and
12253 // profitable to do so
12254 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
12255 if (I->hasOneUse() &&
12256 CheapToScalarize(BO, isa<ConstantInt>(EI.getOperand(1)))) {
12258 Builder->CreateExtractElement(BO->getOperand(0), EI.getOperand(1),
12259 EI.getName()+".lhs");
12261 Builder->CreateExtractElement(BO->getOperand(1), EI.getOperand(1),
12262 EI.getName()+".rhs");
12263 return BinaryOperator::Create(BO->getOpcode(), newEI0, newEI1);
12265 } else if (InsertElementInst *IE = dyn_cast<InsertElementInst>(I)) {
12266 // Extracting the inserted element?
12267 if (IE->getOperand(2) == EI.getOperand(1))
12268 return ReplaceInstUsesWith(EI, IE->getOperand(1));
12269 // If the inserted and extracted elements are constants, they must not
12270 // be the same value, extract from the pre-inserted value instead.
12271 if (isa<Constant>(IE->getOperand(2)) && isa<Constant>(EI.getOperand(1))) {
12272 Worklist.AddValue(EI.getOperand(0));
12273 EI.setOperand(0, IE->getOperand(0));
12276 } else if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I)) {
12277 // If this is extracting an element from a shufflevector, figure out where
12278 // it came from and extract from the appropriate input element instead.
12279 if (ConstantInt *Elt = dyn_cast<ConstantInt>(EI.getOperand(1))) {
12280 unsigned SrcIdx = getShuffleMask(SVI)[Elt->getZExtValue()];
12282 unsigned LHSWidth =
12283 cast<VectorType>(SVI->getOperand(0)->getType())->getNumElements();
12285 if (SrcIdx < LHSWidth)
12286 Src = SVI->getOperand(0);
12287 else if (SrcIdx < LHSWidth*2) {
12288 SrcIdx -= LHSWidth;
12289 Src = SVI->getOperand(1);
12291 return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType()));
12293 return ExtractElementInst::Create(Src,
12294 ConstantInt::get(Type::getInt32Ty(*Context), SrcIdx,
12298 // FIXME: Canonicalize extractelement(bitcast) -> bitcast(extractelement)
12303 /// CollectSingleShuffleElements - If V is a shuffle of values that ONLY returns
12304 /// elements from either LHS or RHS, return the shuffle mask and true.
12305 /// Otherwise, return false.
12306 static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
12307 std::vector<Constant*> &Mask,
12308 LLVMContext *Context) {
12309 assert(V->getType() == LHS->getType() && V->getType() == RHS->getType() &&
12310 "Invalid CollectSingleShuffleElements");
12311 unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
12313 if (isa<UndefValue>(V)) {
12314 Mask.assign(NumElts, UndefValue::get(Type::getInt32Ty(*Context)));
12316 } else if (V == LHS) {
12317 for (unsigned i = 0; i != NumElts; ++i)
12318 Mask.push_back(ConstantInt::get(Type::getInt32Ty(*Context), i));
12320 } else if (V == RHS) {
12321 for (unsigned i = 0; i != NumElts; ++i)
12322 Mask.push_back(ConstantInt::get(Type::getInt32Ty(*Context), i+NumElts));
12324 } else if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(V)) {
12325 // If this is an insert of an extract from some other vector, include it.
12326 Value *VecOp = IEI->getOperand(0);
12327 Value *ScalarOp = IEI->getOperand(1);
12328 Value *IdxOp = IEI->getOperand(2);
12330 if (!isa<ConstantInt>(IdxOp))
12332 unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
12334 if (isa<UndefValue>(ScalarOp)) { // inserting undef into vector.
12335 // Okay, we can handle this if the vector we are insertinting into is
12336 // transitively ok.
12337 if (CollectSingleShuffleElements(VecOp, LHS, RHS, Mask, Context)) {
12338 // If so, update the mask to reflect the inserted undef.
12339 Mask[InsertedIdx] = UndefValue::get(Type::getInt32Ty(*Context));
12342 } else if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)){
12343 if (isa<ConstantInt>(EI->getOperand(1)) &&
12344 EI->getOperand(0)->getType() == V->getType()) {
12345 unsigned ExtractedIdx =
12346 cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
12348 // This must be extracting from either LHS or RHS.
12349 if (EI->getOperand(0) == LHS || EI->getOperand(0) == RHS) {
12350 // Okay, we can handle this if the vector we are insertinting into is
12351 // transitively ok.
12352 if (CollectSingleShuffleElements(VecOp, LHS, RHS, Mask, Context)) {
12353 // If so, update the mask to reflect the inserted value.
12354 if (EI->getOperand(0) == LHS) {
12355 Mask[InsertedIdx % NumElts] =
12356 ConstantInt::get(Type::getInt32Ty(*Context), ExtractedIdx);
12358 assert(EI->getOperand(0) == RHS);
12359 Mask[InsertedIdx % NumElts] =
12360 ConstantInt::get(Type::getInt32Ty(*Context), ExtractedIdx+NumElts);
12369 // TODO: Handle shufflevector here!
12374 /// CollectShuffleElements - We are building a shuffle of V, using RHS as the
12375 /// RHS of the shuffle instruction, if it is not null. Return a shuffle mask
12376 /// that computes V and the LHS value of the shuffle.
12377 static Value *CollectShuffleElements(Value *V, std::vector<Constant*> &Mask,
12378 Value *&RHS, LLVMContext *Context) {
12379 assert(isa<VectorType>(V->getType()) &&
12380 (RHS == 0 || V->getType() == RHS->getType()) &&
12381 "Invalid shuffle!");
12382 unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
12384 if (isa<UndefValue>(V)) {
12385 Mask.assign(NumElts, UndefValue::get(Type::getInt32Ty(*Context)));
12387 } else if (isa<ConstantAggregateZero>(V)) {
12388 Mask.assign(NumElts, ConstantInt::get(Type::getInt32Ty(*Context), 0));
12390 } else if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(V)) {
12391 // If this is an insert of an extract from some other vector, include it.
12392 Value *VecOp = IEI->getOperand(0);
12393 Value *ScalarOp = IEI->getOperand(1);
12394 Value *IdxOp = IEI->getOperand(2);
12396 if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)) {
12397 if (isa<ConstantInt>(EI->getOperand(1)) && isa<ConstantInt>(IdxOp) &&
12398 EI->getOperand(0)->getType() == V->getType()) {
12399 unsigned ExtractedIdx =
12400 cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
12401 unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
12403 // Either the extracted from or inserted into vector must be RHSVec,
12404 // otherwise we'd end up with a shuffle of three inputs.
12405 if (EI->getOperand(0) == RHS || RHS == 0) {
12406 RHS = EI->getOperand(0);
12407 Value *V = CollectShuffleElements(VecOp, Mask, RHS, Context);
12408 Mask[InsertedIdx % NumElts] =
12409 ConstantInt::get(Type::getInt32Ty(*Context), NumElts+ExtractedIdx);
12413 if (VecOp == RHS) {
12414 Value *V = CollectShuffleElements(EI->getOperand(0), Mask,
12416 // Everything but the extracted element is replaced with the RHS.
12417 for (unsigned i = 0; i != NumElts; ++i) {
12418 if (i != InsertedIdx)
12419 Mask[i] = ConstantInt::get(Type::getInt32Ty(*Context), NumElts+i);
12424 // If this insertelement is a chain that comes from exactly these two
12425 // vectors, return the vector and the effective shuffle.
12426 if (CollectSingleShuffleElements(IEI, EI->getOperand(0), RHS, Mask,
12428 return EI->getOperand(0);
12433 // TODO: Handle shufflevector here!
12435 // Otherwise, can't do anything fancy. Return an identity vector.
12436 for (unsigned i = 0; i != NumElts; ++i)
12437 Mask.push_back(ConstantInt::get(Type::getInt32Ty(*Context), i));
12441 Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) {
12442 Value *VecOp = IE.getOperand(0);
12443 Value *ScalarOp = IE.getOperand(1);
12444 Value *IdxOp = IE.getOperand(2);
12446 // Inserting an undef or into an undefined place, remove this.
12447 if (isa<UndefValue>(ScalarOp) || isa<UndefValue>(IdxOp))
12448 ReplaceInstUsesWith(IE, VecOp);
12450 // If the inserted element was extracted from some other vector, and if the
12451 // indexes are constant, try to turn this into a shufflevector operation.
12452 if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)) {
12453 if (isa<ConstantInt>(EI->getOperand(1)) && isa<ConstantInt>(IdxOp) &&
12454 EI->getOperand(0)->getType() == IE.getType()) {
12455 unsigned NumVectorElts = IE.getType()->getNumElements();
12456 unsigned ExtractedIdx =
12457 cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
12458 unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
12460 if (ExtractedIdx >= NumVectorElts) // Out of range extract.
12461 return ReplaceInstUsesWith(IE, VecOp);
12463 if (InsertedIdx >= NumVectorElts) // Out of range insert.
12464 return ReplaceInstUsesWith(IE, UndefValue::get(IE.getType()));
12466 // If we are extracting a value from a vector, then inserting it right
12467 // back into the same place, just use the input vector.
12468 if (EI->getOperand(0) == VecOp && ExtractedIdx == InsertedIdx)
12469 return ReplaceInstUsesWith(IE, VecOp);
12471 // We could theoretically do this for ANY input. However, doing so could
12472 // turn chains of insertelement instructions into a chain of shufflevector
12473 // instructions, and right now we do not merge shufflevectors. As such,
12474 // only do this in a situation where it is clear that there is benefit.
12475 if (isa<UndefValue>(VecOp) || isa<ConstantAggregateZero>(VecOp)) {
12476 // Turn this into shuffle(EIOp0, VecOp, Mask). The result has all of
12477 // the values of VecOp, except then one read from EIOp0.
12478 // Build a new shuffle mask.
12479 std::vector<Constant*> Mask;
12480 if (isa<UndefValue>(VecOp))
12481 Mask.assign(NumVectorElts, UndefValue::get(Type::getInt32Ty(*Context)));
12483 assert(isa<ConstantAggregateZero>(VecOp) && "Unknown thing");
12484 Mask.assign(NumVectorElts, ConstantInt::get(Type::getInt32Ty(*Context),
12487 Mask[InsertedIdx] =
12488 ConstantInt::get(Type::getInt32Ty(*Context), ExtractedIdx);
12489 return new ShuffleVectorInst(EI->getOperand(0), VecOp,
12490 ConstantVector::get(Mask));
12493 // If this insertelement isn't used by some other insertelement, turn it
12494 // (and any insertelements it points to), into one big shuffle.
12495 if (!IE.hasOneUse() || !isa<InsertElementInst>(IE.use_back())) {
12496 std::vector<Constant*> Mask;
12498 Value *LHS = CollectShuffleElements(&IE, Mask, RHS, Context);
12499 if (RHS == 0) RHS = UndefValue::get(LHS->getType());
12500 // We now have a shuffle of LHS, RHS, Mask.
12501 return new ShuffleVectorInst(LHS, RHS,
12502 ConstantVector::get(Mask));
12507 unsigned VWidth = cast<VectorType>(VecOp->getType())->getNumElements();
12508 APInt UndefElts(VWidth, 0);
12509 APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
12510 if (SimplifyDemandedVectorElts(&IE, AllOnesEltMask, UndefElts))
12517 Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
12518 Value *LHS = SVI.getOperand(0);
12519 Value *RHS = SVI.getOperand(1);
12520 std::vector<unsigned> Mask = getShuffleMask(&SVI);
12522 bool MadeChange = false;
12524 // Undefined shuffle mask -> undefined value.
12525 if (isa<UndefValue>(SVI.getOperand(2)))
12526 return ReplaceInstUsesWith(SVI, UndefValue::get(SVI.getType()));
12528 unsigned VWidth = cast<VectorType>(SVI.getType())->getNumElements();
12530 if (VWidth != cast<VectorType>(LHS->getType())->getNumElements())
12533 APInt UndefElts(VWidth, 0);
12534 APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
12535 if (SimplifyDemandedVectorElts(&SVI, AllOnesEltMask, UndefElts)) {
12536 LHS = SVI.getOperand(0);
12537 RHS = SVI.getOperand(1);
12541 // Canonicalize shuffle(x ,x,mask) -> shuffle(x, undef,mask')
12542 // Canonicalize shuffle(undef,x,mask) -> shuffle(x, undef,mask').
12543 if (LHS == RHS || isa<UndefValue>(LHS)) {
12544 if (isa<UndefValue>(LHS) && LHS == RHS) {
12545 // shuffle(undef,undef,mask) -> undef.
12546 return ReplaceInstUsesWith(SVI, LHS);
12549 // Remap any references to RHS to use LHS.
12550 std::vector<Constant*> Elts;
12551 for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
12552 if (Mask[i] >= 2*e)
12553 Elts.push_back(UndefValue::get(Type::getInt32Ty(*Context)));
12555 if ((Mask[i] >= e && isa<UndefValue>(RHS)) ||
12556 (Mask[i] < e && isa<UndefValue>(LHS))) {
12557 Mask[i] = 2*e; // Turn into undef.
12558 Elts.push_back(UndefValue::get(Type::getInt32Ty(*Context)));
12560 Mask[i] = Mask[i] % e; // Force to LHS.
12561 Elts.push_back(ConstantInt::get(Type::getInt32Ty(*Context), Mask[i]));
12565 SVI.setOperand(0, SVI.getOperand(1));
12566 SVI.setOperand(1, UndefValue::get(RHS->getType()));
12567 SVI.setOperand(2, ConstantVector::get(Elts));
12568 LHS = SVI.getOperand(0);
12569 RHS = SVI.getOperand(1);
12573 // Analyze the shuffle, are the LHS or RHS and identity shuffles?
12574 bool isLHSID = true, isRHSID = true;
12576 for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
12577 if (Mask[i] >= e*2) continue; // Ignore undef values.
12578 // Is this an identity shuffle of the LHS value?
12579 isLHSID &= (Mask[i] == i);
12581 // Is this an identity shuffle of the RHS value?
12582 isRHSID &= (Mask[i]-e == i);
12585 // Eliminate identity shuffles.
12586 if (isLHSID) return ReplaceInstUsesWith(SVI, LHS);
12587 if (isRHSID) return ReplaceInstUsesWith(SVI, RHS);
12589 // If the LHS is a shufflevector itself, see if we can combine it with this
12590 // one without producing an unusual shuffle. Here we are really conservative:
12591 // we are absolutely afraid of producing a shuffle mask not in the input
12592 // program, because the code gen may not be smart enough to turn a merged
12593 // shuffle into two specific shuffles: it may produce worse code. As such,
12594 // we only merge two shuffles if the result is one of the two input shuffle
12595 // masks. In this case, merging the shuffles just removes one instruction,
12596 // which we know is safe. This is good for things like turning:
12597 // (splat(splat)) -> splat.
12598 if (ShuffleVectorInst *LHSSVI = dyn_cast<ShuffleVectorInst>(LHS)) {
12599 if (isa<UndefValue>(RHS)) {
12600 std::vector<unsigned> LHSMask = getShuffleMask(LHSSVI);
12602 std::vector<unsigned> NewMask;
12603 for (unsigned i = 0, e = Mask.size(); i != e; ++i)
12604 if (Mask[i] >= 2*e)
12605 NewMask.push_back(2*e);
12607 NewMask.push_back(LHSMask[Mask[i]]);
12609 // If the result mask is equal to the src shuffle or this shuffle mask, do
12610 // the replacement.
12611 if (NewMask == LHSMask || NewMask == Mask) {
12612 unsigned LHSInNElts =
12613 cast<VectorType>(LHSSVI->getOperand(0)->getType())->getNumElements();
12614 std::vector<Constant*> Elts;
12615 for (unsigned i = 0, e = NewMask.size(); i != e; ++i) {
12616 if (NewMask[i] >= LHSInNElts*2) {
12617 Elts.push_back(UndefValue::get(Type::getInt32Ty(*Context)));
12619 Elts.push_back(ConstantInt::get(Type::getInt32Ty(*Context), NewMask[i]));
12622 return new ShuffleVectorInst(LHSSVI->getOperand(0),
12623 LHSSVI->getOperand(1),
12624 ConstantVector::get(Elts));
12629 return MadeChange ? &SVI : 0;
12635 /// TryToSinkInstruction - Try to move the specified instruction from its
12636 /// current block into the beginning of DestBlock, which can only happen if it's
12637 /// safe to move the instruction past all of the instructions between it and the
12638 /// end of its block.
12639 static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
12640 assert(I->hasOneUse() && "Invariants didn't hold!");
12642 // Cannot move control-flow-involving, volatile loads, vaarg, etc.
12643 if (isa<PHINode>(I) || I->mayHaveSideEffects() || isa<TerminatorInst>(I))
12646 // Do not sink alloca instructions out of the entry block.
12647 if (isa<AllocaInst>(I) && I->getParent() ==
12648 &DestBlock->getParent()->getEntryBlock())
12651 // We can only sink load instructions if there is nothing between the load and
12652 // the end of block that could change the value.
12653 if (I->mayReadFromMemory()) {
12654 for (BasicBlock::iterator Scan = I, E = I->getParent()->end();
12656 if (Scan->mayWriteToMemory())
12660 BasicBlock::iterator InsertPos = DestBlock->getFirstNonPHI();
12662 CopyPrecedingStopPoint(I, InsertPos);
12663 I->moveBefore(InsertPos);
12669 /// AddReachableCodeToWorklist - Walk the function in depth-first order, adding
12670 /// all reachable code to the worklist.
12672 /// This has a couple of tricks to make the code faster and more powerful. In
12673 /// particular, we constant fold and DCE instructions as we go, to avoid adding
12674 /// them to the worklist (this significantly speeds up instcombine on code where
12675 /// many instructions are dead or constant). Additionally, if we find a branch
12676 /// whose condition is a known constant, we only visit the reachable successors.
12678 static void AddReachableCodeToWorklist(BasicBlock *BB,
12679 SmallPtrSet<BasicBlock*, 64> &Visited,
12681 const TargetData *TD) {
12682 SmallVector<BasicBlock*, 256> Worklist;
12683 Worklist.push_back(BB);
12685 while (!Worklist.empty()) {
12686 BB = Worklist.back();
12687 Worklist.pop_back();
12689 // We have now visited this block! If we've already been here, ignore it.
12690 if (!Visited.insert(BB)) continue;
12692 DbgInfoIntrinsic *DBI_Prev = NULL;
12693 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
12694 Instruction *Inst = BBI++;
12696 // DCE instruction if trivially dead.
12697 if (isInstructionTriviallyDead(Inst)) {
12699 DEBUG(errs() << "IC: DCE: " << *Inst << '\n');
12700 Inst->eraseFromParent();
12704 // ConstantProp instruction if trivially constant.
12705 if (Constant *C = ConstantFoldInstruction(Inst, BB->getContext(), TD)) {
12706 DEBUG(errs() << "IC: ConstFold to: " << *C << " from: "
12708 Inst->replaceAllUsesWith(C);
12710 Inst->eraseFromParent();
12714 // If there are two consecutive llvm.dbg.stoppoint calls then
12715 // it is likely that the optimizer deleted code in between these
12717 DbgInfoIntrinsic *DBI_Next = dyn_cast<DbgInfoIntrinsic>(Inst);
12720 && DBI_Prev->getIntrinsicID() == llvm::Intrinsic::dbg_stoppoint
12721 && DBI_Next->getIntrinsicID() == llvm::Intrinsic::dbg_stoppoint) {
12722 IC.Worklist.Remove(DBI_Prev);
12723 DBI_Prev->eraseFromParent();
12725 DBI_Prev = DBI_Next;
12730 IC.Worklist.Add(Inst);
12733 // Recursively visit successors. If this is a branch or switch on a
12734 // constant, only visit the reachable successor.
12735 TerminatorInst *TI = BB->getTerminator();
12736 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
12737 if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) {
12738 bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue();
12739 BasicBlock *ReachableBB = BI->getSuccessor(!CondVal);
12740 Worklist.push_back(ReachableBB);
12743 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
12744 if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
12745 // See if this is an explicit destination.
12746 for (unsigned i = 1, e = SI->getNumSuccessors(); i != e; ++i)
12747 if (SI->getCaseValue(i) == Cond) {
12748 BasicBlock *ReachableBB = SI->getSuccessor(i);
12749 Worklist.push_back(ReachableBB);
12753 // Otherwise it is the default destination.
12754 Worklist.push_back(SI->getSuccessor(0));
12759 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
12760 Worklist.push_back(TI->getSuccessor(i));
12764 bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
12765 MadeIRChange = false;
12766 TD = getAnalysisIfAvailable<TargetData>();
12768 DEBUG(errs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
12769 << F.getNameStr() << "\n");
12772 // Do a depth-first traversal of the function, populate the worklist with
12773 // the reachable instructions. Ignore blocks that are not reachable. Keep
12774 // track of which blocks we visit.
12775 SmallPtrSet<BasicBlock*, 64> Visited;
12776 AddReachableCodeToWorklist(F.begin(), Visited, *this, TD);
12778 // Do a quick scan over the function. If we find any blocks that are
12779 // unreachable, remove any instructions inside of them. This prevents
12780 // the instcombine code from having to deal with some bad special cases.
12781 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
12782 if (!Visited.count(BB)) {
12783 Instruction *Term = BB->getTerminator();
12784 while (Term != BB->begin()) { // Remove instrs bottom-up
12785 BasicBlock::iterator I = Term; --I;
12787 DEBUG(errs() << "IC: DCE: " << *I << '\n');
12788 // A debug intrinsic shouldn't force another iteration if we weren't
12789 // going to do one without it.
12790 if (!isa<DbgInfoIntrinsic>(I)) {
12792 MadeIRChange = true;
12794 if (!I->use_empty())
12795 I->replaceAllUsesWith(UndefValue::get(I->getType()));
12796 I->eraseFromParent();
12801 while (!Worklist.isEmpty()) {
12802 Instruction *I = Worklist.RemoveOne();
12803 if (I == 0) continue; // skip null values.
12805 // Check to see if we can DCE the instruction.
12806 if (isInstructionTriviallyDead(I)) {
12807 DEBUG(errs() << "IC: DCE: " << *I << '\n');
12808 EraseInstFromFunction(*I);
12810 MadeIRChange = true;
12814 // Instruction isn't dead, see if we can constant propagate it.
12815 if (Constant *C = ConstantFoldInstruction(I, F.getContext(), TD)) {
12816 DEBUG(errs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
12818 // Add operands to the worklist.
12819 ReplaceInstUsesWith(*I, C);
12821 EraseInstFromFunction(*I);
12822 MadeIRChange = true;
12827 // See if we can constant fold its operands.
12828 for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i)
12829 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(i))
12830 if (Constant *NewC = ConstantFoldConstantExpression(CE,
12831 F.getContext(), TD))
12834 MadeIRChange = true;
12838 // See if we can trivially sink this instruction to a successor basic block.
12839 if (I->hasOneUse()) {
12840 BasicBlock *BB = I->getParent();
12841 BasicBlock *UserParent = cast<Instruction>(I->use_back())->getParent();
12842 if (UserParent != BB) {
12843 bool UserIsSuccessor = false;
12844 // See if the user is one of our successors.
12845 for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI)
12846 if (*SI == UserParent) {
12847 UserIsSuccessor = true;
12851 // If the user is one of our immediate successors, and if that successor
12852 // only has us as a predecessors (we'd have to split the critical edge
12853 // otherwise), we can keep going.
12854 if (UserIsSuccessor && !isa<PHINode>(I->use_back()) &&
12855 next(pred_begin(UserParent)) == pred_end(UserParent))
12856 // Okay, the CFG is simple enough, try to sink this instruction.
12857 MadeIRChange |= TryToSinkInstruction(I, UserParent);
12861 // Now that we have an instruction, try combining it to simplify it.
12862 Builder->SetInsertPoint(I->getParent(), I);
12867 DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str(););
12868 DEBUG(errs() << "IC: Visiting: " << OrigI << '\n');
12870 if (Instruction *Result = visit(*I)) {
12872 // Should we replace the old instruction with a new one?
12874 DEBUG(errs() << "IC: Old = " << *I << '\n'
12875 << " New = " << *Result << '\n');
12877 // Everything uses the new instruction now.
12878 I->replaceAllUsesWith(Result);
12880 // Push the new instruction and any users onto the worklist.
12881 Worklist.Add(Result);
12882 Worklist.AddUsersToWorkList(*Result);
12884 // Move the name to the new instruction first.
12885 Result->takeName(I);
12887 // Insert the new instruction into the basic block...
12888 BasicBlock *InstParent = I->getParent();
12889 BasicBlock::iterator InsertPos = I;
12891 if (!isa<PHINode>(Result)) // If combining a PHI, don't insert
12892 while (isa<PHINode>(InsertPos)) // middle of a block of PHIs.
12895 InstParent->getInstList().insert(InsertPos, Result);
12897 EraseInstFromFunction(*I);
12900 DEBUG(errs() << "IC: Mod = " << OrigI << '\n'
12901 << " New = " << *I << '\n');
12904 // If the instruction was modified, it's possible that it is now dead.
12905 // if so, remove it.
12906 if (isInstructionTriviallyDead(I)) {
12907 EraseInstFromFunction(*I);
12910 Worklist.AddUsersToWorkList(*I);
12913 MadeIRChange = true;
12918 return MadeIRChange;
12922 bool InstCombiner::runOnFunction(Function &F) {
12923 MustPreserveLCSSA = mustPreserveAnalysisID(LCSSAID);
12924 Context = &F.getContext();
12927 /// Builder - This is an IRBuilder that automatically inserts new
12928 /// instructions into the worklist when they are created.
12929 IRBuilder<true, ConstantFolder, InstCombineIRInserter>
12930 TheBuilder(F.getContext(), ConstantFolder(F.getContext()),
12931 InstCombineIRInserter(Worklist));
12932 Builder = &TheBuilder;
12934 bool EverMadeChange = false;
12936 // Iterate while there is work to do.
12937 unsigned Iteration = 0;
12938 while (DoOneIteration(F, Iteration++))
12939 EverMadeChange = true;
12942 return EverMadeChange;
12945 FunctionPass *llvm::createInstructionCombiningPass() {
12946 return new InstCombiner();