1 //===- InstCombineAndOrXor.cpp --------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visitAnd, visitOr, and visitXor functions.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombine.h"
15 #include "llvm/Intrinsics.h"
16 #include "llvm/Analysis/InstructionSimplify.h"
17 #include "llvm/Support/PatternMatch.h"
19 using namespace PatternMatch;
22 /// AddOne - Add one to a ConstantInt.
23 static Constant *AddOne(Constant *C) {
24 return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1));
26 /// SubOne - Subtract one from a ConstantInt.
27 static Constant *SubOne(ConstantInt *C) {
28 return ConstantInt::get(C->getContext(), C->getValue()-1);
31 /// isFreeToInvert - Return true if the specified value is free to invert (apply
32 /// ~ to). This happens in cases where the ~ can be eliminated.
33 static inline bool isFreeToInvert(Value *V) {
35 if (BinaryOperator::isNot(V))
38 // Constants can be considered to be not'ed values.
39 if (isa<ConstantInt>(V))
42 // Compares can be inverted if they have a single use.
43 if (CmpInst *CI = dyn_cast<CmpInst>(V))
44 return CI->hasOneUse();
49 static inline Value *dyn_castNotVal(Value *V) {
50 // If this is not(not(x)) don't return that this is a not: we want the two
51 // not's to be folded first.
52 if (BinaryOperator::isNot(V)) {
53 Value *Operand = BinaryOperator::getNotArgument(V);
54 if (!isFreeToInvert(Operand))
58 // Constants can be considered to be not'ed values...
59 if (ConstantInt *C = dyn_cast<ConstantInt>(V))
60 return ConstantInt::get(C->getType(), ~C->getValue());
65 /// getICmpCode - Encode a icmp predicate into a three bit mask. These bits
66 /// are carefully arranged to allow folding of expressions such as:
68 /// (A < B) | (A > B) --> (A != B)
70 /// Note that this is only valid if the first and second predicates have the
71 /// same sign. Is illegal to do: (A u< B) | (A s> B)
73 /// Three bits are used to represent the condition, as follows:
78 /// <=> Value Definition
79 /// 000 0 Always false
88 static unsigned getICmpCode(const ICmpInst *ICI) {
89 switch (ICI->getPredicate()) {
91 case ICmpInst::ICMP_UGT: return 1; // 001
92 case ICmpInst::ICMP_SGT: return 1; // 001
93 case ICmpInst::ICMP_EQ: return 2; // 010
94 case ICmpInst::ICMP_UGE: return 3; // 011
95 case ICmpInst::ICMP_SGE: return 3; // 011
96 case ICmpInst::ICMP_ULT: return 4; // 100
97 case ICmpInst::ICMP_SLT: return 4; // 100
98 case ICmpInst::ICMP_NE: return 5; // 101
99 case ICmpInst::ICMP_ULE: return 6; // 110
100 case ICmpInst::ICMP_SLE: return 6; // 110
103 llvm_unreachable("Invalid ICmp predicate!");
108 /// getFCmpCode - Similar to getICmpCode but for FCmpInst. This encodes a fcmp
109 /// predicate into a three bit mask. It also returns whether it is an ordered
110 /// predicate by reference.
111 static unsigned getFCmpCode(FCmpInst::Predicate CC, bool &isOrdered) {
114 case FCmpInst::FCMP_ORD: isOrdered = true; return 0; // 000
115 case FCmpInst::FCMP_UNO: return 0; // 000
116 case FCmpInst::FCMP_OGT: isOrdered = true; return 1; // 001
117 case FCmpInst::FCMP_UGT: return 1; // 001
118 case FCmpInst::FCMP_OEQ: isOrdered = true; return 2; // 010
119 case FCmpInst::FCMP_UEQ: return 2; // 010
120 case FCmpInst::FCMP_OGE: isOrdered = true; return 3; // 011
121 case FCmpInst::FCMP_UGE: return 3; // 011
122 case FCmpInst::FCMP_OLT: isOrdered = true; return 4; // 100
123 case FCmpInst::FCMP_ULT: return 4; // 100
124 case FCmpInst::FCMP_ONE: isOrdered = true; return 5; // 101
125 case FCmpInst::FCMP_UNE: return 5; // 101
126 case FCmpInst::FCMP_OLE: isOrdered = true; return 6; // 110
127 case FCmpInst::FCMP_ULE: return 6; // 110
130 // Not expecting FCMP_FALSE and FCMP_TRUE;
131 llvm_unreachable("Unexpected FCmp predicate!");
136 /// getICmpValue - This is the complement of getICmpCode, which turns an
137 /// opcode and two operands into either a constant true or false, or a brand
138 /// new ICmp instruction. The sign is passed in to determine which kind
139 /// of predicate to use in the new icmp instruction.
140 static Value *getICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS,
141 InstCombiner::BuilderTy *Builder) {
142 CmpInst::Predicate Pred;
144 default: assert(0 && "Illegal ICmp code!");
146 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
147 case 1: Pred = Sign ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
148 case 2: Pred = ICmpInst::ICMP_EQ; break;
149 case 3: Pred = Sign ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
150 case 4: Pred = Sign ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
151 case 5: Pred = ICmpInst::ICMP_NE; break;
152 case 6: Pred = Sign ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
154 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 1);
156 return Builder->CreateICmp(Pred, LHS, RHS);
159 /// getFCmpValue - This is the complement of getFCmpCode, which turns an
160 /// opcode and two operands into either a FCmp instruction. isordered is passed
161 /// in to determine which kind of predicate to use in the new fcmp instruction.
162 static Value *getFCmpValue(bool isordered, unsigned code,
163 Value *LHS, Value *RHS,
164 InstCombiner::BuilderTy *Builder) {
165 CmpInst::Predicate Pred;
167 default: assert(0 && "Illegal FCmp code!");
168 case 0: Pred = isordered ? FCmpInst::FCMP_ORD : FCmpInst::FCMP_UNO; break;
169 case 1: Pred = isordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT; break;
170 case 2: Pred = isordered ? FCmpInst::FCMP_OEQ : FCmpInst::FCMP_UEQ; break;
171 case 3: Pred = isordered ? FCmpInst::FCMP_OGE : FCmpInst::FCMP_UGE; break;
172 case 4: Pred = isordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT; break;
173 case 5: Pred = isordered ? FCmpInst::FCMP_ONE : FCmpInst::FCMP_UNE; break;
174 case 6: Pred = isordered ? FCmpInst::FCMP_OLE : FCmpInst::FCMP_ULE; break;
175 case 7: return ConstantInt::getTrue(LHS->getContext());
177 return Builder->CreateFCmp(Pred, LHS, RHS);
180 /// PredicatesFoldable - Return true if both predicates match sign or if at
181 /// least one of them is an equality comparison (which is signless).
182 static bool PredicatesFoldable(ICmpInst::Predicate p1, ICmpInst::Predicate p2) {
183 return (CmpInst::isSigned(p1) == CmpInst::isSigned(p2)) ||
184 (CmpInst::isSigned(p1) && ICmpInst::isEquality(p2)) ||
185 (CmpInst::isSigned(p2) && ICmpInst::isEquality(p1));
188 // OptAndOp - This handles expressions of the form ((val OP C1) & C2). Where
189 // the Op parameter is 'OP', OpRHS is 'C1', and AndRHS is 'C2'. Op is
190 // guaranteed to be a binary operator.
191 Instruction *InstCombiner::OptAndOp(Instruction *Op,
194 BinaryOperator &TheAnd) {
195 Value *X = Op->getOperand(0);
196 Constant *Together = 0;
198 Together = ConstantExpr::getAnd(AndRHS, OpRHS);
200 switch (Op->getOpcode()) {
201 case Instruction::Xor:
202 if (Op->hasOneUse()) {
203 // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
204 Value *And = Builder->CreateAnd(X, AndRHS);
206 return BinaryOperator::CreateXor(And, Together);
209 case Instruction::Or:
210 if (Together == AndRHS) // (X | C) & C --> C
211 return ReplaceInstUsesWith(TheAnd, AndRHS);
213 if (Op->hasOneUse() && Together != OpRHS) {
214 // (X | C1) & C2 --> (X | (C1&C2)) & C2
215 Value *Or = Builder->CreateOr(X, Together);
217 return BinaryOperator::CreateAnd(Or, AndRHS);
220 case Instruction::Add:
221 if (Op->hasOneUse()) {
222 // Adding a one to a single bit bit-field should be turned into an XOR
223 // of the bit. First thing to check is to see if this AND is with a
224 // single bit constant.
225 const APInt &AndRHSV = cast<ConstantInt>(AndRHS)->getValue();
227 // If there is only one bit set.
228 if (AndRHSV.isPowerOf2()) {
229 // Ok, at this point, we know that we are masking the result of the
230 // ADD down to exactly one bit. If the constant we are adding has
231 // no bits set below this bit, then we can eliminate the ADD.
232 const APInt& AddRHS = cast<ConstantInt>(OpRHS)->getValue();
234 // Check to see if any bits below the one bit set in AndRHSV are set.
235 if ((AddRHS & (AndRHSV-1)) == 0) {
236 // If not, the only thing that can effect the output of the AND is
237 // the bit specified by AndRHSV. If that bit is set, the effect of
238 // the XOR is to toggle the bit. If it is clear, then the ADD has
240 if ((AddRHS & AndRHSV) == 0) { // Bit is not set, noop
241 TheAnd.setOperand(0, X);
244 // Pull the XOR out of the AND.
245 Value *NewAnd = Builder->CreateAnd(X, AndRHS);
246 NewAnd->takeName(Op);
247 return BinaryOperator::CreateXor(NewAnd, AndRHS);
254 case Instruction::Shl: {
255 // We know that the AND will not produce any of the bits shifted in, so if
256 // the anded constant includes them, clear them now!
258 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
259 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
260 APInt ShlMask(APInt::getHighBitsSet(BitWidth, BitWidth-OpRHSVal));
261 ConstantInt *CI = ConstantInt::get(AndRHS->getContext(),
262 AndRHS->getValue() & ShlMask);
264 if (CI->getValue() == ShlMask) {
265 // Masking out bits that the shift already masks
266 return ReplaceInstUsesWith(TheAnd, Op); // No need for the and.
267 } else if (CI != AndRHS) { // Reducing bits set in and.
268 TheAnd.setOperand(1, CI);
273 case Instruction::LShr: {
274 // We know that the AND will not produce any of the bits shifted in, so if
275 // the anded constant includes them, clear them now! This only applies to
276 // unsigned shifts, because a signed shr may bring in set bits!
278 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
279 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
280 APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
281 ConstantInt *CI = ConstantInt::get(Op->getContext(),
282 AndRHS->getValue() & ShrMask);
284 if (CI->getValue() == ShrMask) {
285 // Masking out bits that the shift already masks.
286 return ReplaceInstUsesWith(TheAnd, Op);
287 } else if (CI != AndRHS) {
288 TheAnd.setOperand(1, CI); // Reduce bits set in and cst.
293 case Instruction::AShr:
295 // See if this is shifting in some sign extension, then masking it out
297 if (Op->hasOneUse()) {
298 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
299 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
300 APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
301 Constant *C = ConstantInt::get(Op->getContext(),
302 AndRHS->getValue() & ShrMask);
303 if (C == AndRHS) { // Masking out bits shifted in.
304 // (Val ashr C1) & C2 -> (Val lshr C1) & C2
305 // Make the argument unsigned.
306 Value *ShVal = Op->getOperand(0);
307 ShVal = Builder->CreateLShr(ShVal, OpRHS, Op->getName());
308 return BinaryOperator::CreateAnd(ShVal, AndRHS, TheAnd.getName());
317 /// InsertRangeTest - Emit a computation of: (V >= Lo && V < Hi) if Inside is
318 /// true, otherwise (V < Lo || V >= Hi). In pratice, we emit the more efficient
319 /// (V-Lo) <u Hi-Lo. This method expects that Lo <= Hi. isSigned indicates
320 /// whether to treat the V, Lo and HI as signed or not. IB is the location to
321 /// insert new instructions.
322 Value *InstCombiner::InsertRangeTest(Value *V, Constant *Lo, Constant *Hi,
323 bool isSigned, bool Inside) {
324 assert(cast<ConstantInt>(ConstantExpr::getICmp((isSigned ?
325 ICmpInst::ICMP_SLE:ICmpInst::ICMP_ULE), Lo, Hi))->getZExtValue() &&
326 "Lo is not <= Hi in range emission code!");
329 if (Lo == Hi) // Trivially false.
330 return ConstantInt::getFalse(V->getContext());
332 // V >= Min && V < Hi --> V < Hi
333 if (cast<ConstantInt>(Lo)->isMinValue(isSigned)) {
334 ICmpInst::Predicate pred = (isSigned ?
335 ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT);
336 return Builder->CreateICmp(pred, V, Hi);
339 // Emit V-Lo <u Hi-Lo
340 Constant *NegLo = ConstantExpr::getNeg(Lo);
341 Value *Add = Builder->CreateAdd(V, NegLo, V->getName()+".off");
342 Constant *UpperBound = ConstantExpr::getAdd(NegLo, Hi);
343 return Builder->CreateICmpULT(Add, UpperBound);
346 if (Lo == Hi) // Trivially true.
347 return ConstantInt::getTrue(V->getContext());
349 // V < Min || V >= Hi -> V > Hi-1
350 Hi = SubOne(cast<ConstantInt>(Hi));
351 if (cast<ConstantInt>(Lo)->isMinValue(isSigned)) {
352 ICmpInst::Predicate pred = (isSigned ?
353 ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT);
354 return Builder->CreateICmp(pred, V, Hi);
357 // Emit V-Lo >u Hi-1-Lo
358 // Note that Hi has already had one subtracted from it, above.
359 ConstantInt *NegLo = cast<ConstantInt>(ConstantExpr::getNeg(Lo));
360 Value *Add = Builder->CreateAdd(V, NegLo, V->getName()+".off");
361 Constant *LowerBound = ConstantExpr::getAdd(NegLo, Hi);
362 return Builder->CreateICmpUGT(Add, LowerBound);
365 // isRunOfOnes - Returns true iff Val consists of one contiguous run of 1s with
366 // any number of 0s on either side. The 1s are allowed to wrap from LSB to
367 // MSB, so 0x000FFF0, 0x0000FFFF, and 0xFF0000FF are all runs. 0x0F0F0000 is
368 // not, since all 1s are not contiguous.
369 static bool isRunOfOnes(ConstantInt *Val, uint32_t &MB, uint32_t &ME) {
370 const APInt& V = Val->getValue();
371 uint32_t BitWidth = Val->getType()->getBitWidth();
372 if (!APIntOps::isShiftedMask(BitWidth, V)) return false;
374 // look for the first zero bit after the run of ones
375 MB = BitWidth - ((V - 1) ^ V).countLeadingZeros();
376 // look for the first non-zero bit
377 ME = V.getActiveBits();
381 /// FoldLogicalPlusAnd - This is part of an expression (LHS +/- RHS) & Mask,
382 /// where isSub determines whether the operator is a sub. If we can fold one of
383 /// the following xforms:
385 /// ((A & N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == Mask
386 /// ((A | N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
387 /// ((A ^ N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
389 /// return (A +/- B).
391 Value *InstCombiner::FoldLogicalPlusAnd(Value *LHS, Value *RHS,
392 ConstantInt *Mask, bool isSub,
394 Instruction *LHSI = dyn_cast<Instruction>(LHS);
395 if (!LHSI || LHSI->getNumOperands() != 2 ||
396 !isa<ConstantInt>(LHSI->getOperand(1))) return 0;
398 ConstantInt *N = cast<ConstantInt>(LHSI->getOperand(1));
400 switch (LHSI->getOpcode()) {
402 case Instruction::And:
403 if (ConstantExpr::getAnd(N, Mask) == Mask) {
404 // If the AndRHS is a power of two minus one (0+1+), this is simple.
405 if ((Mask->getValue().countLeadingZeros() +
406 Mask->getValue().countPopulation()) ==
407 Mask->getValue().getBitWidth())
410 // Otherwise, if Mask is 0+1+0+, and if B is known to have the low 0+
411 // part, we don't need any explicit masks to take them out of A. If that
412 // is all N is, ignore it.
413 uint32_t MB = 0, ME = 0;
414 if (isRunOfOnes(Mask, MB, ME)) { // begin/end bit of run, inclusive
415 uint32_t BitWidth = cast<IntegerType>(RHS->getType())->getBitWidth();
416 APInt Mask(APInt::getLowBitsSet(BitWidth, MB-1));
417 if (MaskedValueIsZero(RHS, Mask))
422 case Instruction::Or:
423 case Instruction::Xor:
424 // If the AndRHS is a power of two minus one (0+1+), and N&Mask == 0
425 if ((Mask->getValue().countLeadingZeros() +
426 Mask->getValue().countPopulation()) == Mask->getValue().getBitWidth()
427 && ConstantExpr::getAnd(N, Mask)->isNullValue())
433 return Builder->CreateSub(LHSI->getOperand(0), RHS, "fold");
434 return Builder->CreateAdd(LHSI->getOperand(0), RHS, "fold");
437 /// FoldAndOfICmps - Fold (icmp)&(icmp) if possible.
438 Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
439 ICmpInst::Predicate LHSCC = LHS->getPredicate(), RHSCC = RHS->getPredicate();
441 // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
442 if (PredicatesFoldable(LHSCC, RHSCC)) {
443 if (LHS->getOperand(0) == RHS->getOperand(1) &&
444 LHS->getOperand(1) == RHS->getOperand(0))
446 if (LHS->getOperand(0) == RHS->getOperand(0) &&
447 LHS->getOperand(1) == RHS->getOperand(1)) {
448 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
449 unsigned Code = getICmpCode(LHS) & getICmpCode(RHS);
450 bool isSigned = LHS->isSigned() || RHS->isSigned();
451 return getICmpValue(isSigned, Code, Op0, Op1, Builder);
455 // This only handles icmp of constants: (icmp1 A, C1) & (icmp2 B, C2).
456 Value *Val = LHS->getOperand(0), *Val2 = RHS->getOperand(0);
457 ConstantInt *LHSCst = dyn_cast<ConstantInt>(LHS->getOperand(1));
458 ConstantInt *RHSCst = dyn_cast<ConstantInt>(RHS->getOperand(1));
459 if (LHSCst == 0 || RHSCst == 0) return 0;
461 if (LHSCst == RHSCst && LHSCC == RHSCC) {
462 // (icmp ult A, C) & (icmp ult B, C) --> (icmp ult (A|B), C)
463 // where C is a power of 2
464 if (LHSCC == ICmpInst::ICMP_ULT &&
465 LHSCst->getValue().isPowerOf2()) {
466 Value *NewOr = Builder->CreateOr(Val, Val2);
467 return Builder->CreateICmp(LHSCC, NewOr, LHSCst);
470 // (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0)
471 if (LHSCC == ICmpInst::ICMP_EQ && LHSCst->isZero()) {
472 Value *NewOr = Builder->CreateOr(Val, Val2);
473 return Builder->CreateICmp(LHSCC, NewOr, LHSCst);
476 // (icmp ne (A & C1), 0) & (icmp ne (A & C2), 0) -->
477 // (icmp eq (A & (C1|C2)), (C1|C2))
478 if (LHSCC == ICmpInst::ICMP_NE && LHSCst->isZero()) {
479 Instruction *I1 = dyn_cast<Instruction>(Val);
480 Instruction *I2 = dyn_cast<Instruction>(Val2);
481 if (I1 && I1->getOpcode() == Instruction::And &&
482 I2 && I2->getOpcode() == Instruction::And &&
483 I1->getOperand(0) == I1->getOperand(0)) {
484 ConstantInt *CI1 = dyn_cast<ConstantInt>(I1->getOperand(1));
485 ConstantInt *CI2 = dyn_cast<ConstantInt>(I2->getOperand(1));
486 if (CI1 && !CI1->isZero() && CI2 && !CI2->isZero() &&
487 CI1->getValue().operator&(CI2->getValue()) == 0) {
488 Constant *ConstOr = ConstantExpr::getOr(CI1, CI2);
489 Value *NewAnd = Builder->CreateAnd(I1->getOperand(0), ConstOr);
490 return Builder->CreateICmp(ICmpInst::ICMP_EQ, NewAnd, ConstOr);
496 // From here on, we only handle:
497 // (icmp1 A, C1) & (icmp2 A, C2) --> something simpler.
498 if (Val != Val2) return 0;
500 // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
501 if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE ||
502 RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE ||
503 LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE ||
504 RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE)
507 // We can't fold (ugt x, C) & (sgt x, C2).
508 if (!PredicatesFoldable(LHSCC, RHSCC))
511 // Ensure that the larger constant is on the RHS.
513 if (CmpInst::isSigned(LHSCC) ||
514 (ICmpInst::isEquality(LHSCC) &&
515 CmpInst::isSigned(RHSCC)))
516 ShouldSwap = LHSCst->getValue().sgt(RHSCst->getValue());
518 ShouldSwap = LHSCst->getValue().ugt(RHSCst->getValue());
522 std::swap(LHSCst, RHSCst);
523 std::swap(LHSCC, RHSCC);
526 // At this point, we know we have two icmp instructions
527 // comparing a value against two constants and and'ing the result
528 // together. Because of the above check, we know that we only have
529 // icmp eq, icmp ne, icmp [su]lt, and icmp [SU]gt here. We also know
530 // (from the icmp folding check above), that the two constants
531 // are not equal and that the larger constant is on the RHS
532 assert(LHSCst != RHSCst && "Compares not folded above?");
535 default: llvm_unreachable("Unknown integer condition code!");
536 case ICmpInst::ICMP_EQ:
538 default: llvm_unreachable("Unknown integer condition code!");
539 case ICmpInst::ICMP_EQ: // (X == 13 & X == 15) -> false
540 case ICmpInst::ICMP_UGT: // (X == 13 & X > 15) -> false
541 case ICmpInst::ICMP_SGT: // (X == 13 & X > 15) -> false
542 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
543 case ICmpInst::ICMP_NE: // (X == 13 & X != 15) -> X == 13
544 case ICmpInst::ICMP_ULT: // (X == 13 & X < 15) -> X == 13
545 case ICmpInst::ICMP_SLT: // (X == 13 & X < 15) -> X == 13
548 case ICmpInst::ICMP_NE:
550 default: llvm_unreachable("Unknown integer condition code!");
551 case ICmpInst::ICMP_ULT:
552 if (LHSCst == SubOne(RHSCst)) // (X != 13 & X u< 14) -> X < 13
553 return Builder->CreateICmpULT(Val, LHSCst);
554 break; // (X != 13 & X u< 15) -> no change
555 case ICmpInst::ICMP_SLT:
556 if (LHSCst == SubOne(RHSCst)) // (X != 13 & X s< 14) -> X < 13
557 return Builder->CreateICmpSLT(Val, LHSCst);
558 break; // (X != 13 & X s< 15) -> no change
559 case ICmpInst::ICMP_EQ: // (X != 13 & X == 15) -> X == 15
560 case ICmpInst::ICMP_UGT: // (X != 13 & X u> 15) -> X u> 15
561 case ICmpInst::ICMP_SGT: // (X != 13 & X s> 15) -> X s> 15
563 case ICmpInst::ICMP_NE:
564 if (LHSCst == SubOne(RHSCst)){// (X != 13 & X != 14) -> X-13 >u 1
565 Constant *AddCST = ConstantExpr::getNeg(LHSCst);
566 Value *Add = Builder->CreateAdd(Val, AddCST, Val->getName()+".off");
567 return Builder->CreateICmpUGT(Add, ConstantInt::get(Add->getType(), 1));
569 break; // (X != 13 & X != 15) -> no change
572 case ICmpInst::ICMP_ULT:
574 default: llvm_unreachable("Unknown integer condition code!");
575 case ICmpInst::ICMP_EQ: // (X u< 13 & X == 15) -> false
576 case ICmpInst::ICMP_UGT: // (X u< 13 & X u> 15) -> false
577 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
578 case ICmpInst::ICMP_SGT: // (X u< 13 & X s> 15) -> no change
580 case ICmpInst::ICMP_NE: // (X u< 13 & X != 15) -> X u< 13
581 case ICmpInst::ICMP_ULT: // (X u< 13 & X u< 15) -> X u< 13
583 case ICmpInst::ICMP_SLT: // (X u< 13 & X s< 15) -> no change
587 case ICmpInst::ICMP_SLT:
589 default: llvm_unreachable("Unknown integer condition code!");
590 case ICmpInst::ICMP_EQ: // (X s< 13 & X == 15) -> false
591 case ICmpInst::ICMP_SGT: // (X s< 13 & X s> 15) -> false
592 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
593 case ICmpInst::ICMP_UGT: // (X s< 13 & X u> 15) -> no change
595 case ICmpInst::ICMP_NE: // (X s< 13 & X != 15) -> X < 13
596 case ICmpInst::ICMP_SLT: // (X s< 13 & X s< 15) -> X < 13
598 case ICmpInst::ICMP_ULT: // (X s< 13 & X u< 15) -> no change
602 case ICmpInst::ICMP_UGT:
604 default: llvm_unreachable("Unknown integer condition code!");
605 case ICmpInst::ICMP_EQ: // (X u> 13 & X == 15) -> X == 15
606 case ICmpInst::ICMP_UGT: // (X u> 13 & X u> 15) -> X u> 15
608 case ICmpInst::ICMP_SGT: // (X u> 13 & X s> 15) -> no change
610 case ICmpInst::ICMP_NE:
611 if (RHSCst == AddOne(LHSCst)) // (X u> 13 & X != 14) -> X u> 14
612 return Builder->CreateICmp(LHSCC, Val, RHSCst);
613 break; // (X u> 13 & X != 15) -> no change
614 case ICmpInst::ICMP_ULT: // (X u> 13 & X u< 15) -> (X-14) <u 1
615 return InsertRangeTest(Val, AddOne(LHSCst), RHSCst, false, true);
616 case ICmpInst::ICMP_SLT: // (X u> 13 & X s< 15) -> no change
620 case ICmpInst::ICMP_SGT:
622 default: llvm_unreachable("Unknown integer condition code!");
623 case ICmpInst::ICMP_EQ: // (X s> 13 & X == 15) -> X == 15
624 case ICmpInst::ICMP_SGT: // (X s> 13 & X s> 15) -> X s> 15
626 case ICmpInst::ICMP_UGT: // (X s> 13 & X u> 15) -> no change
628 case ICmpInst::ICMP_NE:
629 if (RHSCst == AddOne(LHSCst)) // (X s> 13 & X != 14) -> X s> 14
630 return Builder->CreateICmp(LHSCC, Val, RHSCst);
631 break; // (X s> 13 & X != 15) -> no change
632 case ICmpInst::ICMP_SLT: // (X s> 13 & X s< 15) -> (X-14) s< 1
633 return InsertRangeTest(Val, AddOne(LHSCst), RHSCst, true, true);
634 case ICmpInst::ICMP_ULT: // (X s> 13 & X u< 15) -> no change
643 /// FoldAndOfFCmps - Optimize (fcmp)&(fcmp). NOTE: Unlike the rest of
644 /// instcombine, this returns a Value which should already be inserted into the
646 Value *InstCombiner::FoldAndOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
647 if (LHS->getPredicate() == FCmpInst::FCMP_ORD &&
648 RHS->getPredicate() == FCmpInst::FCMP_ORD) {
649 // (fcmp ord x, c) & (fcmp ord y, c) -> (fcmp ord x, y)
650 if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1)))
651 if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) {
652 // If either of the constants are nans, then the whole thing returns
654 if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
655 return ConstantInt::getFalse(LHS->getContext());
656 return Builder->CreateFCmpORD(LHS->getOperand(0), RHS->getOperand(0));
659 // Handle vector zeros. This occurs because the canonical form of
660 // "fcmp ord x,x" is "fcmp ord x, 0".
661 if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
662 isa<ConstantAggregateZero>(RHS->getOperand(1)))
663 return Builder->CreateFCmpORD(LHS->getOperand(0), RHS->getOperand(0));
667 Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1);
668 Value *Op1LHS = RHS->getOperand(0), *Op1RHS = RHS->getOperand(1);
669 FCmpInst::Predicate Op0CC = LHS->getPredicate(), Op1CC = RHS->getPredicate();
672 if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) {
673 // Swap RHS operands to match LHS.
674 Op1CC = FCmpInst::getSwappedPredicate(Op1CC);
675 std::swap(Op1LHS, Op1RHS);
678 if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) {
679 // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y).
681 return Builder->CreateFCmp((FCmpInst::Predicate)Op0CC, Op0LHS, Op0RHS);
682 if (Op0CC == FCmpInst::FCMP_FALSE || Op1CC == FCmpInst::FCMP_FALSE)
683 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
684 if (Op0CC == FCmpInst::FCMP_TRUE)
686 if (Op1CC == FCmpInst::FCMP_TRUE)
691 unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered);
692 unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered);
695 std::swap(Op0Pred, Op1Pred);
696 std::swap(Op0Ordered, Op1Ordered);
699 // uno && ueq -> uno && (uno || eq) -> ueq
700 // ord && olt -> ord && (ord && lt) -> olt
701 if (Op0Ordered == Op1Ordered)
704 // uno && oeq -> uno && (ord && eq) -> false
705 // uno && ord -> false
707 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
708 // ord && ueq -> ord && (uno || eq) -> oeq
709 return getFCmpValue(true, Op1Pred, Op0LHS, Op0RHS, Builder);
717 Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
718 bool Changed = SimplifyCommutative(I);
719 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
721 if (Value *V = SimplifyAndInst(Op0, Op1, TD))
722 return ReplaceInstUsesWith(I, V);
724 // See if we can simplify any instructions used by the instruction whose sole
725 // purpose is to compute bits we don't care about.
726 if (SimplifyDemandedInstructionBits(I))
729 if (ConstantInt *AndRHS = dyn_cast<ConstantInt>(Op1)) {
730 const APInt &AndRHSMask = AndRHS->getValue();
731 APInt NotAndRHS(~AndRHSMask);
733 // Optimize a variety of ((val OP C1) & C2) combinations...
734 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
735 Value *Op0LHS = Op0I->getOperand(0);
736 Value *Op0RHS = Op0I->getOperand(1);
737 switch (Op0I->getOpcode()) {
739 case Instruction::Xor:
740 case Instruction::Or:
741 // If the mask is only needed on one incoming arm, push it up.
742 if (!Op0I->hasOneUse()) break;
744 if (MaskedValueIsZero(Op0LHS, NotAndRHS)) {
745 // Not masking anything out for the LHS, move to RHS.
746 Value *NewRHS = Builder->CreateAnd(Op0RHS, AndRHS,
747 Op0RHS->getName()+".masked");
748 return BinaryOperator::Create(Op0I->getOpcode(), Op0LHS, NewRHS);
750 if (!isa<Constant>(Op0RHS) &&
751 MaskedValueIsZero(Op0RHS, NotAndRHS)) {
752 // Not masking anything out for the RHS, move to LHS.
753 Value *NewLHS = Builder->CreateAnd(Op0LHS, AndRHS,
754 Op0LHS->getName()+".masked");
755 return BinaryOperator::Create(Op0I->getOpcode(), NewLHS, Op0RHS);
759 case Instruction::Add:
760 // ((A & N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == AndRHS.
761 // ((A | N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
762 // ((A ^ N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
763 if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, false, I))
764 return BinaryOperator::CreateAnd(V, AndRHS);
765 if (Value *V = FoldLogicalPlusAnd(Op0RHS, Op0LHS, AndRHS, false, I))
766 return BinaryOperator::CreateAnd(V, AndRHS); // Add commutes
769 case Instruction::Sub:
770 // ((A & N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == AndRHS.
771 // ((A | N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
772 // ((A ^ N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
773 if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, true, I))
774 return BinaryOperator::CreateAnd(V, AndRHS);
776 // (A - N) & AndRHS -> -N & AndRHS iff A&AndRHS==0 and AndRHS
777 // has 1's for all bits that the subtraction with A might affect.
778 if (Op0I->hasOneUse()) {
779 uint32_t BitWidth = AndRHSMask.getBitWidth();
780 uint32_t Zeros = AndRHSMask.countLeadingZeros();
781 APInt Mask = APInt::getLowBitsSet(BitWidth, BitWidth - Zeros);
783 ConstantInt *A = dyn_cast<ConstantInt>(Op0LHS);
784 if (!(A && A->isZero()) && // avoid infinite recursion.
785 MaskedValueIsZero(Op0LHS, Mask)) {
786 Value *NewNeg = Builder->CreateNeg(Op0RHS);
787 return BinaryOperator::CreateAnd(NewNeg, AndRHS);
792 case Instruction::Shl:
793 case Instruction::LShr:
794 // (1 << x) & 1 --> zext(x == 0)
795 // (1 >> x) & 1 --> zext(x == 0)
796 if (AndRHSMask == 1 && Op0LHS == AndRHS) {
798 Builder->CreateICmpEQ(Op0RHS, Constant::getNullValue(I.getType()));
799 return new ZExtInst(NewICmp, I.getType());
804 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1)))
805 if (Instruction *Res = OptAndOp(Op0I, Op0CI, AndRHS, I))
807 } else if (CastInst *CI = dyn_cast<CastInst>(Op0)) {
808 // If this is an integer truncation or change from signed-to-unsigned, and
809 // if the source is an and/or with immediate, transform it. This
810 // frequently occurs for bitfield accesses.
811 if (Instruction *CastOp = dyn_cast<Instruction>(CI->getOperand(0))) {
812 if ((isa<TruncInst>(CI) || isa<BitCastInst>(CI)) &&
813 CastOp->getNumOperands() == 2)
814 if (ConstantInt *AndCI =dyn_cast<ConstantInt>(CastOp->getOperand(1))){
815 if (CastOp->getOpcode() == Instruction::And) {
816 // Change: and (cast (and X, C1) to T), C2
817 // into : and (cast X to T), trunc_or_bitcast(C1)&C2
818 // This will fold the two constants together, which may allow
819 // other simplifications.
820 Value *NewCast = Builder->CreateTruncOrBitCast(
821 CastOp->getOperand(0), I.getType(),
822 CastOp->getName()+".shrunk");
823 // trunc_or_bitcast(C1)&C2
824 Constant *C3 = ConstantExpr::getTruncOrBitCast(AndCI,I.getType());
825 C3 = ConstantExpr::getAnd(C3, AndRHS);
826 return BinaryOperator::CreateAnd(NewCast, C3);
827 } else if (CastOp->getOpcode() == Instruction::Or) {
828 // Change: and (cast (or X, C1) to T), C2
829 // into : trunc(C1)&C2 iff trunc(C1)&C2 == C2
830 Constant *C3 = ConstantExpr::getTruncOrBitCast(AndCI,I.getType());
831 if (ConstantExpr::getAnd(C3, AndRHS) == AndRHS)
833 return ReplaceInstUsesWith(I, AndRHS);
839 // Try to fold constant and into select arguments.
840 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
841 if (Instruction *R = FoldOpIntoSelect(I, SI))
843 if (isa<PHINode>(Op0))
844 if (Instruction *NV = FoldOpIntoPhi(I))
849 // (~A & ~B) == (~(A | B)) - De Morgan's Law
850 if (Value *Op0NotVal = dyn_castNotVal(Op0))
851 if (Value *Op1NotVal = dyn_castNotVal(Op1))
852 if (Op0->hasOneUse() && Op1->hasOneUse()) {
853 Value *Or = Builder->CreateOr(Op0NotVal, Op1NotVal,
854 I.getName()+".demorgan");
855 return BinaryOperator::CreateNot(Or);
859 Value *A = 0, *B = 0, *C = 0, *D = 0;
860 // (A|B) & ~(A&B) -> A^B
861 if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
862 match(Op1, m_Not(m_And(m_Value(C), m_Value(D)))) &&
863 ((A == C && B == D) || (A == D && B == C)))
864 return BinaryOperator::CreateXor(A, B);
866 // ~(A&B) & (A|B) -> A^B
867 if (match(Op1, m_Or(m_Value(A), m_Value(B))) &&
868 match(Op0, m_Not(m_And(m_Value(C), m_Value(D)))) &&
869 ((A == C && B == D) || (A == D && B == C)))
870 return BinaryOperator::CreateXor(A, B);
872 if (Op0->hasOneUse() &&
873 match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
874 if (A == Op1) { // (A^B)&A -> A&(A^B)
875 I.swapOperands(); // Simplify below
877 } else if (B == Op1) { // (A^B)&B -> B&(B^A)
878 cast<BinaryOperator>(Op0)->swapOperands();
879 I.swapOperands(); // Simplify below
884 if (Op1->hasOneUse() &&
885 match(Op1, m_Xor(m_Value(A), m_Value(B)))) {
886 if (B == Op0) { // B&(A^B) -> B&(B^A)
887 cast<BinaryOperator>(Op1)->swapOperands();
890 if (A == Op0) // A&(A^B) -> A & ~B
891 return BinaryOperator::CreateAnd(A, Builder->CreateNot(B, "tmp"));
894 // (A&((~A)|B)) -> A&B
895 if (match(Op0, m_Or(m_Not(m_Specific(Op1)), m_Value(A))) ||
896 match(Op0, m_Or(m_Value(A), m_Not(m_Specific(Op1)))))
897 return BinaryOperator::CreateAnd(A, Op1);
898 if (match(Op1, m_Or(m_Not(m_Specific(Op0)), m_Value(A))) ||
899 match(Op1, m_Or(m_Value(A), m_Not(m_Specific(Op0)))))
900 return BinaryOperator::CreateAnd(A, Op0);
903 if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1))
904 if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0))
905 if (Value *Res = FoldAndOfICmps(LHS, RHS))
906 return ReplaceInstUsesWith(I, Res);
908 // If and'ing two fcmp, try combine them into one.
909 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0)))
910 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
911 if (Value *Res = FoldAndOfFCmps(LHS, RHS))
912 return ReplaceInstUsesWith(I, Res);
915 // fold (and (cast A), (cast B)) -> (cast (and A, B))
916 if (CastInst *Op0C = dyn_cast<CastInst>(Op0))
917 if (CastInst *Op1C = dyn_cast<CastInst>(Op1)) {
918 const Type *SrcTy = Op0C->getOperand(0)->getType();
919 if (Op0C->getOpcode() == Op1C->getOpcode() && // same cast kind ?
920 SrcTy == Op1C->getOperand(0)->getType() &&
921 SrcTy->isIntOrIntVectorTy()) {
922 Value *Op0COp = Op0C->getOperand(0), *Op1COp = Op1C->getOperand(0);
924 // Only do this if the casts both really cause code to be generated.
925 if (ShouldOptimizeCast(Op0C->getOpcode(), Op0COp, I.getType()) &&
926 ShouldOptimizeCast(Op1C->getOpcode(), Op1COp, I.getType())) {
927 Value *NewOp = Builder->CreateAnd(Op0COp, Op1COp, I.getName());
928 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
931 // If this is and(cast(icmp), cast(icmp)), try to fold this even if the
932 // cast is otherwise not optimizable. This happens for vector sexts.
933 if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1COp))
934 if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0COp))
935 if (Value *Res = FoldAndOfICmps(LHS, RHS))
936 return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
938 // If this is and(cast(fcmp), cast(fcmp)), try to fold this even if the
939 // cast is otherwise not optimizable. This happens for vector sexts.
940 if (FCmpInst *RHS = dyn_cast<FCmpInst>(Op1COp))
941 if (FCmpInst *LHS = dyn_cast<FCmpInst>(Op0COp))
942 if (Value *Res = FoldAndOfFCmps(LHS, RHS))
943 return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
947 // (X >> Z) & (Y >> Z) -> (X&Y) >> Z for all shifts.
948 if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) {
949 if (BinaryOperator *SI0 = dyn_cast<BinaryOperator>(Op0))
950 if (SI0->isShift() && SI0->getOpcode() == SI1->getOpcode() &&
951 SI0->getOperand(1) == SI1->getOperand(1) &&
952 (SI0->hasOneUse() || SI1->hasOneUse())) {
954 Builder->CreateAnd(SI0->getOperand(0), SI1->getOperand(0),
956 return BinaryOperator::Create(SI1->getOpcode(), NewOp,
961 return Changed ? &I : 0;
964 /// CollectBSwapParts - Analyze the specified subexpression and see if it is
965 /// capable of providing pieces of a bswap. The subexpression provides pieces
966 /// of a bswap if it is proven that each of the non-zero bytes in the output of
967 /// the expression came from the corresponding "byte swapped" byte in some other
968 /// value. For example, if the current subexpression is "(shl i32 %X, 24)" then
969 /// we know that the expression deposits the low byte of %X into the high byte
970 /// of the bswap result and that all other bytes are zero. This expression is
971 /// accepted, the high byte of ByteValues is set to X to indicate a correct
974 /// This function returns true if the match was unsuccessful and false if so.
975 /// On entry to the function the "OverallLeftShift" is a signed integer value
976 /// indicating the number of bytes that the subexpression is later shifted. For
977 /// example, if the expression is later right shifted by 16 bits, the
978 /// OverallLeftShift value would be -2 on entry. This is used to specify which
979 /// byte of ByteValues is actually being set.
981 /// Similarly, ByteMask is a bitmask where a bit is clear if its corresponding
982 /// byte is masked to zero by a user. For example, in (X & 255), X will be
983 /// processed with a bytemask of 1. Because bytemask is 32-bits, this limits
984 /// this function to working on up to 32-byte (256 bit) values. ByteMask is
985 /// always in the local (OverallLeftShift) coordinate space.
987 static bool CollectBSwapParts(Value *V, int OverallLeftShift, uint32_t ByteMask,
988 SmallVector<Value*, 8> &ByteValues) {
989 if (Instruction *I = dyn_cast<Instruction>(V)) {
990 // If this is an or instruction, it may be an inner node of the bswap.
991 if (I->getOpcode() == Instruction::Or) {
992 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
994 CollectBSwapParts(I->getOperand(1), OverallLeftShift, ByteMask,
998 // If this is a logical shift by a constant multiple of 8, recurse with
999 // OverallLeftShift and ByteMask adjusted.
1000 if (I->isLogicalShift() && isa<ConstantInt>(I->getOperand(1))) {
1002 cast<ConstantInt>(I->getOperand(1))->getLimitedValue(~0U);
1003 // Ensure the shift amount is defined and of a byte value.
1004 if ((ShAmt & 7) || (ShAmt > 8*ByteValues.size()))
1007 unsigned ByteShift = ShAmt >> 3;
1008 if (I->getOpcode() == Instruction::Shl) {
1009 // X << 2 -> collect(X, +2)
1010 OverallLeftShift += ByteShift;
1011 ByteMask >>= ByteShift;
1013 // X >>u 2 -> collect(X, -2)
1014 OverallLeftShift -= ByteShift;
1015 ByteMask <<= ByteShift;
1016 ByteMask &= (~0U >> (32-ByteValues.size()));
1019 if (OverallLeftShift >= (int)ByteValues.size()) return true;
1020 if (OverallLeftShift <= -(int)ByteValues.size()) return true;
1022 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
1026 // If this is a logical 'and' with a mask that clears bytes, clear the
1027 // corresponding bytes in ByteMask.
1028 if (I->getOpcode() == Instruction::And &&
1029 isa<ConstantInt>(I->getOperand(1))) {
1030 // Scan every byte of the and mask, seeing if the byte is either 0 or 255.
1031 unsigned NumBytes = ByteValues.size();
1032 APInt Byte(I->getType()->getPrimitiveSizeInBits(), 255);
1033 const APInt &AndMask = cast<ConstantInt>(I->getOperand(1))->getValue();
1035 for (unsigned i = 0; i != NumBytes; ++i, Byte <<= 8) {
1036 // If this byte is masked out by a later operation, we don't care what
1038 if ((ByteMask & (1 << i)) == 0)
1041 // If the AndMask is all zeros for this byte, clear the bit.
1042 APInt MaskB = AndMask & Byte;
1044 ByteMask &= ~(1U << i);
1048 // If the AndMask is not all ones for this byte, it's not a bytezap.
1052 // Otherwise, this byte is kept.
1055 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
1060 // Okay, we got to something that isn't a shift, 'or' or 'and'. This must be
1061 // the input value to the bswap. Some observations: 1) if more than one byte
1062 // is demanded from this input, then it could not be successfully assembled
1063 // into a byteswap. At least one of the two bytes would not be aligned with
1064 // their ultimate destination.
1065 if (!isPowerOf2_32(ByteMask)) return true;
1066 unsigned InputByteNo = CountTrailingZeros_32(ByteMask);
1068 // 2) The input and ultimate destinations must line up: if byte 3 of an i32
1069 // is demanded, it needs to go into byte 0 of the result. This means that the
1070 // byte needs to be shifted until it lands in the right byte bucket. The
1071 // shift amount depends on the position: if the byte is coming from the high
1072 // part of the value (e.g. byte 3) then it must be shifted right. If from the
1073 // low part, it must be shifted left.
1074 unsigned DestByteNo = InputByteNo + OverallLeftShift;
1075 if (InputByteNo < ByteValues.size()/2) {
1076 if (ByteValues.size()-1-DestByteNo != InputByteNo)
1079 if (ByteValues.size()-1-DestByteNo != InputByteNo)
1083 // If the destination byte value is already defined, the values are or'd
1084 // together, which isn't a bswap (unless it's an or of the same bits).
1085 if (ByteValues[DestByteNo] && ByteValues[DestByteNo] != V)
1087 ByteValues[DestByteNo] = V;
1091 /// MatchBSwap - Given an OR instruction, check to see if this is a bswap idiom.
1092 /// If so, insert the new bswap intrinsic and return it.
1093 Instruction *InstCombiner::MatchBSwap(BinaryOperator &I) {
1094 const IntegerType *ITy = dyn_cast<IntegerType>(I.getType());
1095 if (!ITy || ITy->getBitWidth() % 16 ||
1096 // ByteMask only allows up to 32-byte values.
1097 ITy->getBitWidth() > 32*8)
1098 return 0; // Can only bswap pairs of bytes. Can't do vectors.
1100 /// ByteValues - For each byte of the result, we keep track of which value
1101 /// defines each byte.
1102 SmallVector<Value*, 8> ByteValues;
1103 ByteValues.resize(ITy->getBitWidth()/8);
1105 // Try to find all the pieces corresponding to the bswap.
1106 uint32_t ByteMask = ~0U >> (32-ByteValues.size());
1107 if (CollectBSwapParts(&I, 0, ByteMask, ByteValues))
1110 // Check to see if all of the bytes come from the same value.
1111 Value *V = ByteValues[0];
1112 if (V == 0) return 0; // Didn't find a byte? Must be zero.
1114 // Check to make sure that all of the bytes come from the same value.
1115 for (unsigned i = 1, e = ByteValues.size(); i != e; ++i)
1116 if (ByteValues[i] != V)
1118 const Type *Tys[] = { ITy };
1119 Module *M = I.getParent()->getParent()->getParent();
1120 Function *F = Intrinsic::getDeclaration(M, Intrinsic::bswap, Tys, 1);
1121 return CallInst::Create(F, V);
1124 /// MatchSelectFromAndOr - We have an expression of the form (A&C)|(B&D). Check
1125 /// If A is (cond?-1:0) and either B or D is ~(cond?-1,0) or (cond?0,-1), then
1126 /// we can simplify this expression to "cond ? C : D or B".
1127 static Instruction *MatchSelectFromAndOr(Value *A, Value *B,
1128 Value *C, Value *D) {
1129 // If A is not a select of -1/0, this cannot match.
1131 if (!match(A, m_SExt(m_Value(Cond))) ||
1132 !Cond->getType()->isIntegerTy(1))
1135 // ((cond?-1:0)&C) | (B&(cond?0:-1)) -> cond ? C : B.
1136 if (match(D, m_Not(m_SExt(m_Specific(Cond)))))
1137 return SelectInst::Create(Cond, C, B);
1138 if (match(D, m_SExt(m_Not(m_Specific(Cond)))))
1139 return SelectInst::Create(Cond, C, B);
1141 // ((cond?-1:0)&C) | ((cond?0:-1)&D) -> cond ? C : D.
1142 if (match(B, m_Not(m_SExt(m_Specific(Cond)))))
1143 return SelectInst::Create(Cond, C, D);
1144 if (match(B, m_SExt(m_Not(m_Specific(Cond)))))
1145 return SelectInst::Create(Cond, C, D);
1149 /// FoldOrOfICmps - Fold (icmp)|(icmp) if possible.
1150 Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
1151 ICmpInst::Predicate LHSCC = LHS->getPredicate(), RHSCC = RHS->getPredicate();
1153 // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B)
1154 if (PredicatesFoldable(LHSCC, RHSCC)) {
1155 if (LHS->getOperand(0) == RHS->getOperand(1) &&
1156 LHS->getOperand(1) == RHS->getOperand(0))
1157 LHS->swapOperands();
1158 if (LHS->getOperand(0) == RHS->getOperand(0) &&
1159 LHS->getOperand(1) == RHS->getOperand(1)) {
1160 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
1161 unsigned Code = getICmpCode(LHS) | getICmpCode(RHS);
1162 bool isSigned = LHS->isSigned() || RHS->isSigned();
1163 return getICmpValue(isSigned, Code, Op0, Op1, Builder);
1167 // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2).
1168 Value *Val = LHS->getOperand(0), *Val2 = RHS->getOperand(0);
1169 ConstantInt *LHSCst = dyn_cast<ConstantInt>(LHS->getOperand(1));
1170 ConstantInt *RHSCst = dyn_cast<ConstantInt>(RHS->getOperand(1));
1171 if (LHSCst == 0 || RHSCst == 0) return 0;
1173 // (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0)
1174 if (LHSCst == RHSCst && LHSCC == RHSCC &&
1175 LHSCC == ICmpInst::ICMP_NE && LHSCst->isZero()) {
1176 Value *NewOr = Builder->CreateOr(Val, Val2);
1177 return Builder->CreateICmp(LHSCC, NewOr, LHSCst);
1180 // From here on, we only handle:
1181 // (icmp1 A, C1) | (icmp2 A, C2) --> something simpler.
1182 if (Val != Val2) return 0;
1184 // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
1185 if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE ||
1186 RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE ||
1187 LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE ||
1188 RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE)
1191 // We can't fold (ugt x, C) | (sgt x, C2).
1192 if (!PredicatesFoldable(LHSCC, RHSCC))
1195 // Ensure that the larger constant is on the RHS.
1197 if (CmpInst::isSigned(LHSCC) ||
1198 (ICmpInst::isEquality(LHSCC) &&
1199 CmpInst::isSigned(RHSCC)))
1200 ShouldSwap = LHSCst->getValue().sgt(RHSCst->getValue());
1202 ShouldSwap = LHSCst->getValue().ugt(RHSCst->getValue());
1205 std::swap(LHS, RHS);
1206 std::swap(LHSCst, RHSCst);
1207 std::swap(LHSCC, RHSCC);
1210 // At this point, we know we have two icmp instructions
1211 // comparing a value against two constants and or'ing the result
1212 // together. Because of the above check, we know that we only have
1213 // ICMP_EQ, ICMP_NE, ICMP_LT, and ICMP_GT here. We also know (from the
1214 // icmp folding check above), that the two constants are not
1216 assert(LHSCst != RHSCst && "Compares not folded above?");
1219 default: llvm_unreachable("Unknown integer condition code!");
1220 case ICmpInst::ICMP_EQ:
1222 default: llvm_unreachable("Unknown integer condition code!");
1223 case ICmpInst::ICMP_EQ:
1224 if (LHSCst == SubOne(RHSCst)) {
1225 // (X == 13 | X == 14) -> X-13 <u 2
1226 Constant *AddCST = ConstantExpr::getNeg(LHSCst);
1227 Value *Add = Builder->CreateAdd(Val, AddCST, Val->getName()+".off");
1228 AddCST = ConstantExpr::getSub(AddOne(RHSCst), LHSCst);
1229 return Builder->CreateICmpULT(Add, AddCST);
1231 break; // (X == 13 | X == 15) -> no change
1232 case ICmpInst::ICMP_UGT: // (X == 13 | X u> 14) -> no change
1233 case ICmpInst::ICMP_SGT: // (X == 13 | X s> 14) -> no change
1235 case ICmpInst::ICMP_NE: // (X == 13 | X != 15) -> X != 15
1236 case ICmpInst::ICMP_ULT: // (X == 13 | X u< 15) -> X u< 15
1237 case ICmpInst::ICMP_SLT: // (X == 13 | X s< 15) -> X s< 15
1241 case ICmpInst::ICMP_NE:
1243 default: llvm_unreachable("Unknown integer condition code!");
1244 case ICmpInst::ICMP_EQ: // (X != 13 | X == 15) -> X != 13
1245 case ICmpInst::ICMP_UGT: // (X != 13 | X u> 15) -> X != 13
1246 case ICmpInst::ICMP_SGT: // (X != 13 | X s> 15) -> X != 13
1248 case ICmpInst::ICMP_NE: // (X != 13 | X != 15) -> true
1249 case ICmpInst::ICMP_ULT: // (X != 13 | X u< 15) -> true
1250 case ICmpInst::ICMP_SLT: // (X != 13 | X s< 15) -> true
1251 return ConstantInt::getTrue(LHS->getContext());
1254 case ICmpInst::ICMP_ULT:
1256 default: llvm_unreachable("Unknown integer condition code!");
1257 case ICmpInst::ICMP_EQ: // (X u< 13 | X == 14) -> no change
1259 case ICmpInst::ICMP_UGT: // (X u< 13 | X u> 15) -> (X-13) u> 2
1260 // If RHSCst is [us]MAXINT, it is always false. Not handling
1261 // this can cause overflow.
1262 if (RHSCst->isMaxValue(false))
1264 return InsertRangeTest(Val, LHSCst, AddOne(RHSCst), false, false);
1265 case ICmpInst::ICMP_SGT: // (X u< 13 | X s> 15) -> no change
1267 case ICmpInst::ICMP_NE: // (X u< 13 | X != 15) -> X != 15
1268 case ICmpInst::ICMP_ULT: // (X u< 13 | X u< 15) -> X u< 15
1270 case ICmpInst::ICMP_SLT: // (X u< 13 | X s< 15) -> no change
1274 case ICmpInst::ICMP_SLT:
1276 default: llvm_unreachable("Unknown integer condition code!");
1277 case ICmpInst::ICMP_EQ: // (X s< 13 | X == 14) -> no change
1279 case ICmpInst::ICMP_SGT: // (X s< 13 | X s> 15) -> (X-13) s> 2
1280 // If RHSCst is [us]MAXINT, it is always false. Not handling
1281 // this can cause overflow.
1282 if (RHSCst->isMaxValue(true))
1284 return InsertRangeTest(Val, LHSCst, AddOne(RHSCst), true, false);
1285 case ICmpInst::ICMP_UGT: // (X s< 13 | X u> 15) -> no change
1287 case ICmpInst::ICMP_NE: // (X s< 13 | X != 15) -> X != 15
1288 case ICmpInst::ICMP_SLT: // (X s< 13 | X s< 15) -> X s< 15
1290 case ICmpInst::ICMP_ULT: // (X s< 13 | X u< 15) -> no change
1294 case ICmpInst::ICMP_UGT:
1296 default: llvm_unreachable("Unknown integer condition code!");
1297 case ICmpInst::ICMP_EQ: // (X u> 13 | X == 15) -> X u> 13
1298 case ICmpInst::ICMP_UGT: // (X u> 13 | X u> 15) -> X u> 13
1300 case ICmpInst::ICMP_SGT: // (X u> 13 | X s> 15) -> no change
1302 case ICmpInst::ICMP_NE: // (X u> 13 | X != 15) -> true
1303 case ICmpInst::ICMP_ULT: // (X u> 13 | X u< 15) -> true
1304 return ConstantInt::getTrue(LHS->getContext());
1305 case ICmpInst::ICMP_SLT: // (X u> 13 | X s< 15) -> no change
1309 case ICmpInst::ICMP_SGT:
1311 default: llvm_unreachable("Unknown integer condition code!");
1312 case ICmpInst::ICMP_EQ: // (X s> 13 | X == 15) -> X > 13
1313 case ICmpInst::ICMP_SGT: // (X s> 13 | X s> 15) -> X > 13
1315 case ICmpInst::ICMP_UGT: // (X s> 13 | X u> 15) -> no change
1317 case ICmpInst::ICMP_NE: // (X s> 13 | X != 15) -> true
1318 case ICmpInst::ICMP_SLT: // (X s> 13 | X s< 15) -> true
1319 return ConstantInt::getTrue(LHS->getContext());
1320 case ICmpInst::ICMP_ULT: // (X s> 13 | X u< 15) -> no change
1328 /// FoldOrOfFCmps - Optimize (fcmp)|(fcmp). NOTE: Unlike the rest of
1329 /// instcombine, this returns a Value which should already be inserted into the
1331 Value *InstCombiner::FoldOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
1332 if (LHS->getPredicate() == FCmpInst::FCMP_UNO &&
1333 RHS->getPredicate() == FCmpInst::FCMP_UNO &&
1334 LHS->getOperand(0)->getType() == RHS->getOperand(0)->getType()) {
1335 if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1)))
1336 if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) {
1337 // If either of the constants are nans, then the whole thing returns
1339 if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
1340 return ConstantInt::getTrue(LHS->getContext());
1342 // Otherwise, no need to compare the two constants, compare the
1344 return Builder->CreateFCmpUNO(LHS->getOperand(0), RHS->getOperand(0));
1347 // Handle vector zeros. This occurs because the canonical form of
1348 // "fcmp uno x,x" is "fcmp uno x, 0".
1349 if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
1350 isa<ConstantAggregateZero>(RHS->getOperand(1)))
1351 return Builder->CreateFCmpUNO(LHS->getOperand(0), RHS->getOperand(0));
1356 Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1);
1357 Value *Op1LHS = RHS->getOperand(0), *Op1RHS = RHS->getOperand(1);
1358 FCmpInst::Predicate Op0CC = LHS->getPredicate(), Op1CC = RHS->getPredicate();
1360 if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) {
1361 // Swap RHS operands to match LHS.
1362 Op1CC = FCmpInst::getSwappedPredicate(Op1CC);
1363 std::swap(Op1LHS, Op1RHS);
1365 if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) {
1366 // Simplify (fcmp cc0 x, y) | (fcmp cc1 x, y).
1368 return Builder->CreateFCmp((FCmpInst::Predicate)Op0CC, Op0LHS, Op0RHS);
1369 if (Op0CC == FCmpInst::FCMP_TRUE || Op1CC == FCmpInst::FCMP_TRUE)
1370 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 1);
1371 if (Op0CC == FCmpInst::FCMP_FALSE)
1373 if (Op1CC == FCmpInst::FCMP_FALSE)
1377 unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered);
1378 unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered);
1379 if (Op0Ordered == Op1Ordered) {
1380 // If both are ordered or unordered, return a new fcmp with
1381 // or'ed predicates.
1382 return getFCmpValue(Op0Ordered, Op0Pred|Op1Pred, Op0LHS, Op0RHS, Builder);
1388 /// FoldOrWithConstants - This helper function folds:
1390 /// ((A | B) & C1) | (B & C2)
1396 /// when the XOR of the two constants is "all ones" (-1).
1397 Instruction *InstCombiner::FoldOrWithConstants(BinaryOperator &I, Value *Op,
1398 Value *A, Value *B, Value *C) {
1399 ConstantInt *CI1 = dyn_cast<ConstantInt>(C);
1403 ConstantInt *CI2 = 0;
1404 if (!match(Op, m_And(m_Value(V1), m_ConstantInt(CI2)))) return 0;
1406 APInt Xor = CI1->getValue() ^ CI2->getValue();
1407 if (!Xor.isAllOnesValue()) return 0;
1409 if (V1 == A || V1 == B) {
1410 Value *NewOp = Builder->CreateAnd((V1 == A) ? B : A, CI1);
1411 return BinaryOperator::CreateOr(NewOp, V1);
1417 Instruction *InstCombiner::visitOr(BinaryOperator &I) {
1418 bool Changed = SimplifyCommutative(I);
1419 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1421 if (Value *V = SimplifyOrInst(Op0, Op1, TD))
1422 return ReplaceInstUsesWith(I, V);
1424 // See if we can simplify any instructions used by the instruction whose sole
1425 // purpose is to compute bits we don't care about.
1426 if (SimplifyDemandedInstructionBits(I))
1429 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
1430 ConstantInt *C1 = 0; Value *X = 0;
1431 // (X & C1) | C2 --> (X | C2) & (C1|C2)
1432 // iff (C1 & C2) == 0.
1433 if (match(Op0, m_And(m_Value(X), m_ConstantInt(C1))) &&
1434 (RHS->getValue() & C1->getValue()) != 0 &&
1436 Value *Or = Builder->CreateOr(X, RHS);
1438 return BinaryOperator::CreateAnd(Or,
1439 ConstantInt::get(I.getContext(),
1440 RHS->getValue() | C1->getValue()));
1443 // (X ^ C1) | C2 --> (X | C2) ^ (C1&~C2)
1444 if (match(Op0, m_Xor(m_Value(X), m_ConstantInt(C1))) &&
1446 Value *Or = Builder->CreateOr(X, RHS);
1448 return BinaryOperator::CreateXor(Or,
1449 ConstantInt::get(I.getContext(),
1450 C1->getValue() & ~RHS->getValue()));
1453 // Try to fold constant and into select arguments.
1454 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
1455 if (Instruction *R = FoldOpIntoSelect(I, SI))
1458 if (isa<PHINode>(Op0))
1459 if (Instruction *NV = FoldOpIntoPhi(I))
1463 Value *A = 0, *B = 0;
1464 ConstantInt *C1 = 0, *C2 = 0;
1466 // (A | B) | C and A | (B | C) -> bswap if possible.
1467 // (A >> B) | (C << D) and (A << B) | (B >> C) -> bswap if possible.
1468 if (match(Op0, m_Or(m_Value(), m_Value())) ||
1469 match(Op1, m_Or(m_Value(), m_Value())) ||
1470 (match(Op0, m_Shift(m_Value(), m_Value())) &&
1471 match(Op1, m_Shift(m_Value(), m_Value())))) {
1472 if (Instruction *BSwap = MatchBSwap(I))
1476 // (X^C)|Y -> (X|Y)^C iff Y&C == 0
1477 if (Op0->hasOneUse() &&
1478 match(Op0, m_Xor(m_Value(A), m_ConstantInt(C1))) &&
1479 MaskedValueIsZero(Op1, C1->getValue())) {
1480 Value *NOr = Builder->CreateOr(A, Op1);
1482 return BinaryOperator::CreateXor(NOr, C1);
1485 // Y|(X^C) -> (X|Y)^C iff Y&C == 0
1486 if (Op1->hasOneUse() &&
1487 match(Op1, m_Xor(m_Value(A), m_ConstantInt(C1))) &&
1488 MaskedValueIsZero(Op0, C1->getValue())) {
1489 Value *NOr = Builder->CreateOr(A, Op0);
1491 return BinaryOperator::CreateXor(NOr, C1);
1495 Value *C = 0, *D = 0;
1496 if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
1497 match(Op1, m_And(m_Value(B), m_Value(D)))) {
1498 Value *V1 = 0, *V2 = 0, *V3 = 0;
1499 C1 = dyn_cast<ConstantInt>(C);
1500 C2 = dyn_cast<ConstantInt>(D);
1501 if (C1 && C2) { // (A & C1)|(B & C2)
1502 // If we have: ((V + N) & C1) | (V & C2)
1503 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
1504 // replace with V+N.
1505 if (C1->getValue() == ~C2->getValue()) {
1506 if ((C2->getValue() & (C2->getValue()+1)) == 0 && // C2 == 0+1+
1507 match(A, m_Add(m_Value(V1), m_Value(V2)))) {
1508 // Add commutes, try both ways.
1509 if (V1 == B && MaskedValueIsZero(V2, C2->getValue()))
1510 return ReplaceInstUsesWith(I, A);
1511 if (V2 == B && MaskedValueIsZero(V1, C2->getValue()))
1512 return ReplaceInstUsesWith(I, A);
1514 // Or commutes, try both ways.
1515 if ((C1->getValue() & (C1->getValue()+1)) == 0 &&
1516 match(B, m_Add(m_Value(V1), m_Value(V2)))) {
1517 // Add commutes, try both ways.
1518 if (V1 == A && MaskedValueIsZero(V2, C1->getValue()))
1519 return ReplaceInstUsesWith(I, B);
1520 if (V2 == A && MaskedValueIsZero(V1, C1->getValue()))
1521 return ReplaceInstUsesWith(I, B);
1525 if ((C1->getValue() & C2->getValue()) == 0) {
1526 // ((V | N) & C1) | (V & C2) --> (V|N) & (C1|C2)
1527 // iff (C1&C2) == 0 and (N&~C1) == 0
1528 if (match(A, m_Or(m_Value(V1), m_Value(V2))) &&
1529 ((V1 == B && MaskedValueIsZero(V2, ~C1->getValue())) || // (V|N)
1530 (V2 == B && MaskedValueIsZero(V1, ~C1->getValue())))) // (N|V)
1531 return BinaryOperator::CreateAnd(A,
1532 ConstantInt::get(A->getContext(),
1533 C1->getValue()|C2->getValue()));
1534 // Or commutes, try both ways.
1535 if (match(B, m_Or(m_Value(V1), m_Value(V2))) &&
1536 ((V1 == A && MaskedValueIsZero(V2, ~C2->getValue())) || // (V|N)
1537 (V2 == A && MaskedValueIsZero(V1, ~C2->getValue())))) // (N|V)
1538 return BinaryOperator::CreateAnd(B,
1539 ConstantInt::get(B->getContext(),
1540 C1->getValue()|C2->getValue()));
1542 // ((V|C3)&C1) | ((V|C4)&C2) --> (V|C3|C4)&(C1|C2)
1543 // iff (C1&C2) == 0 and (C3&~C1) == 0 and (C4&~C2) == 0.
1544 ConstantInt *C3 = 0, *C4 = 0;
1545 if (match(A, m_Or(m_Value(V1), m_ConstantInt(C3))) &&
1546 (C3->getValue() & ~C1->getValue()) == 0 &&
1547 match(B, m_Or(m_Specific(V1), m_ConstantInt(C4))) &&
1548 (C4->getValue() & ~C2->getValue()) == 0) {
1549 V2 = Builder->CreateOr(V1, ConstantExpr::getOr(C3, C4), "bitfield");
1550 return BinaryOperator::CreateAnd(V2,
1551 ConstantInt::get(B->getContext(),
1552 C1->getValue()|C2->getValue()));
1557 // Check to see if we have any common things being and'ed. If so, find the
1558 // terms for V1 & (V2|V3).
1559 if (Op0->hasOneUse() || Op1->hasOneUse()) {
1561 if (A == B) // (A & C)|(A & D) == A & (C|D)
1562 V1 = A, V2 = C, V3 = D;
1563 else if (A == D) // (A & C)|(B & A) == A & (B|C)
1564 V1 = A, V2 = B, V3 = C;
1565 else if (C == B) // (A & C)|(C & D) == C & (A|D)
1566 V1 = C, V2 = A, V3 = D;
1567 else if (C == D) // (A & C)|(B & C) == C & (A|B)
1568 V1 = C, V2 = A, V3 = B;
1571 Value *Or = Builder->CreateOr(V2, V3, "tmp");
1572 return BinaryOperator::CreateAnd(V1, Or);
1576 // (A & (C0?-1:0)) | (B & ~(C0?-1:0)) -> C0 ? A : B, and commuted variants.
1577 // Don't do this for vector select idioms, the code generator doesn't handle
1579 if (!I.getType()->isVectorTy()) {
1580 if (Instruction *Match = MatchSelectFromAndOr(A, B, C, D))
1582 if (Instruction *Match = MatchSelectFromAndOr(B, A, D, C))
1584 if (Instruction *Match = MatchSelectFromAndOr(C, B, A, D))
1586 if (Instruction *Match = MatchSelectFromAndOr(D, A, B, C))
1590 // ((A&~B)|(~A&B)) -> A^B
1591 if ((match(C, m_Not(m_Specific(D))) &&
1592 match(B, m_Not(m_Specific(A)))))
1593 return BinaryOperator::CreateXor(A, D);
1594 // ((~B&A)|(~A&B)) -> A^B
1595 if ((match(A, m_Not(m_Specific(D))) &&
1596 match(B, m_Not(m_Specific(C)))))
1597 return BinaryOperator::CreateXor(C, D);
1598 // ((A&~B)|(B&~A)) -> A^B
1599 if ((match(C, m_Not(m_Specific(B))) &&
1600 match(D, m_Not(m_Specific(A)))))
1601 return BinaryOperator::CreateXor(A, B);
1602 // ((~B&A)|(B&~A)) -> A^B
1603 if ((match(A, m_Not(m_Specific(B))) &&
1604 match(D, m_Not(m_Specific(C)))))
1605 return BinaryOperator::CreateXor(C, B);
1607 // ((A|B)&1)|(B&-2) -> (A&1) | B
1608 if (match(A, m_Or(m_Value(V1), m_Specific(B))) ||
1609 match(A, m_Or(m_Specific(B), m_Value(V1)))) {
1610 Instruction *Ret = FoldOrWithConstants(I, Op1, V1, B, C);
1611 if (Ret) return Ret;
1613 // (B&-2)|((A|B)&1) -> (A&1) | B
1614 if (match(B, m_Or(m_Specific(A), m_Value(V1))) ||
1615 match(B, m_Or(m_Value(V1), m_Specific(A)))) {
1616 Instruction *Ret = FoldOrWithConstants(I, Op0, A, V1, D);
1617 if (Ret) return Ret;
1621 // (X >> Z) | (Y >> Z) -> (X|Y) >> Z for all shifts.
1622 if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) {
1623 if (BinaryOperator *SI0 = dyn_cast<BinaryOperator>(Op0))
1624 if (SI0->isShift() && SI0->getOpcode() == SI1->getOpcode() &&
1625 SI0->getOperand(1) == SI1->getOperand(1) &&
1626 (SI0->hasOneUse() || SI1->hasOneUse())) {
1627 Value *NewOp = Builder->CreateOr(SI0->getOperand(0), SI1->getOperand(0),
1629 return BinaryOperator::Create(SI1->getOpcode(), NewOp,
1630 SI1->getOperand(1));
1634 // (~A | ~B) == (~(A & B)) - De Morgan's Law
1635 if (Value *Op0NotVal = dyn_castNotVal(Op0))
1636 if (Value *Op1NotVal = dyn_castNotVal(Op1))
1637 if (Op0->hasOneUse() && Op1->hasOneUse()) {
1638 Value *And = Builder->CreateAnd(Op0NotVal, Op1NotVal,
1639 I.getName()+".demorgan");
1640 return BinaryOperator::CreateNot(And);
1643 if (ICmpInst *RHS = dyn_cast<ICmpInst>(I.getOperand(1)))
1644 if (ICmpInst *LHS = dyn_cast<ICmpInst>(I.getOperand(0)))
1645 if (Value *Res = FoldOrOfICmps(LHS, RHS))
1646 return ReplaceInstUsesWith(I, Res);
1648 // (fcmp uno x, c) | (fcmp uno y, c) -> (fcmp uno x, y)
1649 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0)))
1650 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
1651 if (Value *Res = FoldOrOfFCmps(LHS, RHS))
1652 return ReplaceInstUsesWith(I, Res);
1654 // fold (or (cast A), (cast B)) -> (cast (or A, B))
1655 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
1656 if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
1657 if (Op0C->getOpcode() == Op1C->getOpcode()) {// same cast kind ?
1658 const Type *SrcTy = Op0C->getOperand(0)->getType();
1659 if (SrcTy == Op1C->getOperand(0)->getType() &&
1660 SrcTy->isIntOrIntVectorTy()) {
1661 Value *Op0COp = Op0C->getOperand(0), *Op1COp = Op1C->getOperand(0);
1663 if ((!isa<ICmpInst>(Op0COp) || !isa<ICmpInst>(Op1COp)) &&
1664 // Only do this if the casts both really cause code to be
1666 ShouldOptimizeCast(Op0C->getOpcode(), Op0COp, I.getType()) &&
1667 ShouldOptimizeCast(Op1C->getOpcode(), Op1COp, I.getType())) {
1668 Value *NewOp = Builder->CreateOr(Op0COp, Op1COp, I.getName());
1669 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
1672 // If this is or(cast(icmp), cast(icmp)), try to fold this even if the
1673 // cast is otherwise not optimizable. This happens for vector sexts.
1674 if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1COp))
1675 if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0COp))
1676 if (Value *Res = FoldOrOfICmps(LHS, RHS))
1677 return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
1679 // If this is or(cast(fcmp), cast(fcmp)), try to fold this even if the
1680 // cast is otherwise not optimizable. This happens for vector sexts.
1681 if (FCmpInst *RHS = dyn_cast<FCmpInst>(Op1COp))
1682 if (FCmpInst *LHS = dyn_cast<FCmpInst>(Op0COp))
1683 if (Value *Res = FoldOrOfFCmps(LHS, RHS))
1684 return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
1689 return Changed ? &I : 0;
1692 Instruction *InstCombiner::visitXor(BinaryOperator &I) {
1693 bool Changed = SimplifyCommutative(I);
1694 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1696 if (isa<UndefValue>(Op1)) {
1697 if (isa<UndefValue>(Op0))
1698 // Handle undef ^ undef -> 0 special case. This is a common
1700 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
1701 return ReplaceInstUsesWith(I, Op1); // X ^ undef -> undef
1706 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
1708 // See if we can simplify any instructions used by the instruction whose sole
1709 // purpose is to compute bits we don't care about.
1710 if (SimplifyDemandedInstructionBits(I))
1712 if (I.getType()->isVectorTy())
1713 if (isa<ConstantAggregateZero>(Op1))
1714 return ReplaceInstUsesWith(I, Op0); // X ^ <0,0> -> X
1716 // Is this a ~ operation?
1717 if (Value *NotOp = dyn_castNotVal(&I)) {
1718 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(NotOp)) {
1719 if (Op0I->getOpcode() == Instruction::And ||
1720 Op0I->getOpcode() == Instruction::Or) {
1721 // ~(~X & Y) --> (X | ~Y) - De Morgan's Law
1722 // ~(~X | Y) === (X & ~Y) - De Morgan's Law
1723 if (dyn_castNotVal(Op0I->getOperand(1)))
1724 Op0I->swapOperands();
1725 if (Value *Op0NotVal = dyn_castNotVal(Op0I->getOperand(0))) {
1727 Builder->CreateNot(Op0I->getOperand(1),
1728 Op0I->getOperand(1)->getName()+".not");
1729 if (Op0I->getOpcode() == Instruction::And)
1730 return BinaryOperator::CreateOr(Op0NotVal, NotY);
1731 return BinaryOperator::CreateAnd(Op0NotVal, NotY);
1734 // ~(X & Y) --> (~X | ~Y) - De Morgan's Law
1735 // ~(X | Y) === (~X & ~Y) - De Morgan's Law
1736 if (isFreeToInvert(Op0I->getOperand(0)) &&
1737 isFreeToInvert(Op0I->getOperand(1))) {
1739 Builder->CreateNot(Op0I->getOperand(0), "notlhs");
1741 Builder->CreateNot(Op0I->getOperand(1), "notrhs");
1742 if (Op0I->getOpcode() == Instruction::And)
1743 return BinaryOperator::CreateOr(NotX, NotY);
1744 return BinaryOperator::CreateAnd(NotX, NotY);
1747 } else if (Op0I->getOpcode() == Instruction::AShr) {
1748 // ~(~X >>s Y) --> (X >>s Y)
1749 if (Value *Op0NotVal = dyn_castNotVal(Op0I->getOperand(0)))
1750 return BinaryOperator::CreateAShr(Op0NotVal, Op0I->getOperand(1));
1756 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
1757 if (RHS->isOne() && Op0->hasOneUse())
1758 // xor (cmp A, B), true = not (cmp A, B) = !cmp A, B
1759 if (CmpInst *CI = dyn_cast<CmpInst>(Op0))
1760 return CmpInst::Create(CI->getOpcode(),
1761 CI->getInversePredicate(),
1762 CI->getOperand(0), CI->getOperand(1));
1764 // fold (xor(zext(cmp)), 1) and (xor(sext(cmp)), -1) to ext(!cmp).
1765 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
1766 if (CmpInst *CI = dyn_cast<CmpInst>(Op0C->getOperand(0))) {
1767 if (CI->hasOneUse() && Op0C->hasOneUse()) {
1768 Instruction::CastOps Opcode = Op0C->getOpcode();
1769 if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) &&
1770 (RHS == ConstantExpr::getCast(Opcode,
1771 ConstantInt::getTrue(I.getContext()),
1772 Op0C->getDestTy()))) {
1773 CI->setPredicate(CI->getInversePredicate());
1774 return CastInst::Create(Opcode, CI, Op0C->getType());
1780 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
1781 // ~(c-X) == X-c-1 == X+(-c-1)
1782 if (Op0I->getOpcode() == Instruction::Sub && RHS->isAllOnesValue())
1783 if (Constant *Op0I0C = dyn_cast<Constant>(Op0I->getOperand(0))) {
1784 Constant *NegOp0I0C = ConstantExpr::getNeg(Op0I0C);
1785 Constant *ConstantRHS = ConstantExpr::getSub(NegOp0I0C,
1786 ConstantInt::get(I.getType(), 1));
1787 return BinaryOperator::CreateAdd(Op0I->getOperand(1), ConstantRHS);
1790 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
1791 if (Op0I->getOpcode() == Instruction::Add) {
1792 // ~(X-c) --> (-c-1)-X
1793 if (RHS->isAllOnesValue()) {
1794 Constant *NegOp0CI = ConstantExpr::getNeg(Op0CI);
1795 return BinaryOperator::CreateSub(
1796 ConstantExpr::getSub(NegOp0CI,
1797 ConstantInt::get(I.getType(), 1)),
1798 Op0I->getOperand(0));
1799 } else if (RHS->getValue().isSignBit()) {
1800 // (X + C) ^ signbit -> (X + C + signbit)
1801 Constant *C = ConstantInt::get(I.getContext(),
1802 RHS->getValue() + Op0CI->getValue());
1803 return BinaryOperator::CreateAdd(Op0I->getOperand(0), C);
1806 } else if (Op0I->getOpcode() == Instruction::Or) {
1807 // (X|C1)^C2 -> X^(C1|C2) iff X&~C1 == 0
1808 if (MaskedValueIsZero(Op0I->getOperand(0), Op0CI->getValue())) {
1809 Constant *NewRHS = ConstantExpr::getOr(Op0CI, RHS);
1810 // Anything in both C1 and C2 is known to be zero, remove it from
1812 Constant *CommonBits = ConstantExpr::getAnd(Op0CI, RHS);
1813 NewRHS = ConstantExpr::getAnd(NewRHS,
1814 ConstantExpr::getNot(CommonBits));
1816 I.setOperand(0, Op0I->getOperand(0));
1817 I.setOperand(1, NewRHS);
1824 // Try to fold constant and into select arguments.
1825 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
1826 if (Instruction *R = FoldOpIntoSelect(I, SI))
1828 if (isa<PHINode>(Op0))
1829 if (Instruction *NV = FoldOpIntoPhi(I))
1833 if (Value *X = dyn_castNotVal(Op0)) // ~A ^ A == -1
1835 return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
1837 if (Value *X = dyn_castNotVal(Op1)) // A ^ ~A == -1
1839 return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
1842 BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1);
1845 if (match(Op1I, m_Or(m_Value(A), m_Value(B)))) {
1846 if (A == Op0) { // B^(B|A) == (A|B)^B
1847 Op1I->swapOperands();
1849 std::swap(Op0, Op1);
1850 } else if (B == Op0) { // B^(A|B) == (A|B)^B
1851 I.swapOperands(); // Simplified below.
1852 std::swap(Op0, Op1);
1854 } else if (match(Op1I, m_Xor(m_Specific(Op0), m_Value(B)))) {
1855 return ReplaceInstUsesWith(I, B); // A^(A^B) == B
1856 } else if (match(Op1I, m_Xor(m_Value(A), m_Specific(Op0)))) {
1857 return ReplaceInstUsesWith(I, A); // A^(B^A) == B
1858 } else if (match(Op1I, m_And(m_Value(A), m_Value(B))) &&
1860 if (A == Op0) { // A^(A&B) -> A^(B&A)
1861 Op1I->swapOperands();
1864 if (B == Op0) { // A^(B&A) -> (B&A)^A
1865 I.swapOperands(); // Simplified below.
1866 std::swap(Op0, Op1);
1871 BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0);
1874 if (match(Op0I, m_Or(m_Value(A), m_Value(B))) &&
1875 Op0I->hasOneUse()) {
1876 if (A == Op1) // (B|A)^B == (A|B)^B
1878 if (B == Op1) // (A|B)^B == A & ~B
1879 return BinaryOperator::CreateAnd(A, Builder->CreateNot(Op1, "tmp"));
1880 } else if (match(Op0I, m_Xor(m_Specific(Op1), m_Value(B)))) {
1881 return ReplaceInstUsesWith(I, B); // (A^B)^A == B
1882 } else if (match(Op0I, m_Xor(m_Value(A), m_Specific(Op1)))) {
1883 return ReplaceInstUsesWith(I, A); // (B^A)^A == B
1884 } else if (match(Op0I, m_And(m_Value(A), m_Value(B))) &&
1886 if (A == Op1) // (A&B)^A -> (B&A)^A
1888 if (B == Op1 && // (B&A)^A == ~B & A
1889 !isa<ConstantInt>(Op1)) { // Canonical form is (B&C)^C
1890 return BinaryOperator::CreateAnd(Builder->CreateNot(A, "tmp"), Op1);
1895 // (X >> Z) ^ (Y >> Z) -> (X^Y) >> Z for all shifts.
1896 if (Op0I && Op1I && Op0I->isShift() &&
1897 Op0I->getOpcode() == Op1I->getOpcode() &&
1898 Op0I->getOperand(1) == Op1I->getOperand(1) &&
1899 (Op1I->hasOneUse() || Op1I->hasOneUse())) {
1901 Builder->CreateXor(Op0I->getOperand(0), Op1I->getOperand(0),
1903 return BinaryOperator::Create(Op1I->getOpcode(), NewOp,
1904 Op1I->getOperand(1));
1908 Value *A, *B, *C, *D;
1909 // (A & B)^(A | B) -> A ^ B
1910 if (match(Op0I, m_And(m_Value(A), m_Value(B))) &&
1911 match(Op1I, m_Or(m_Value(C), m_Value(D)))) {
1912 if ((A == C && B == D) || (A == D && B == C))
1913 return BinaryOperator::CreateXor(A, B);
1915 // (A | B)^(A & B) -> A ^ B
1916 if (match(Op0I, m_Or(m_Value(A), m_Value(B))) &&
1917 match(Op1I, m_And(m_Value(C), m_Value(D)))) {
1918 if ((A == C && B == D) || (A == D && B == C))
1919 return BinaryOperator::CreateXor(A, B);
1923 if ((Op0I->hasOneUse() || Op1I->hasOneUse()) &&
1924 match(Op0I, m_And(m_Value(A), m_Value(B))) &&
1925 match(Op1I, m_And(m_Value(C), m_Value(D)))) {
1926 // (X & Y)^(X & Y) -> (Y^Z) & X
1927 Value *X = 0, *Y = 0, *Z = 0;
1929 X = A, Y = B, Z = D;
1931 X = A, Y = B, Z = C;
1933 X = B, Y = A, Z = D;
1935 X = B, Y = A, Z = C;
1938 Value *NewOp = Builder->CreateXor(Y, Z, Op0->getName());
1939 return BinaryOperator::CreateAnd(NewOp, X);
1944 // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B)
1945 if (ICmpInst *RHS = dyn_cast<ICmpInst>(I.getOperand(1)))
1946 if (ICmpInst *LHS = dyn_cast<ICmpInst>(I.getOperand(0)))
1947 if (PredicatesFoldable(LHS->getPredicate(), RHS->getPredicate())) {
1948 if (LHS->getOperand(0) == RHS->getOperand(1) &&
1949 LHS->getOperand(1) == RHS->getOperand(0))
1950 LHS->swapOperands();
1951 if (LHS->getOperand(0) == RHS->getOperand(0) &&
1952 LHS->getOperand(1) == RHS->getOperand(1)) {
1953 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
1954 unsigned Code = getICmpCode(LHS) ^ getICmpCode(RHS);
1955 bool isSigned = LHS->isSigned() || RHS->isSigned();
1956 return ReplaceInstUsesWith(I,
1957 getICmpValue(isSigned, Code, Op0, Op1, Builder));
1961 // fold (xor (cast A), (cast B)) -> (cast (xor A, B))
1962 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
1963 if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
1964 if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind?
1965 const Type *SrcTy = Op0C->getOperand(0)->getType();
1966 if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isIntegerTy() &&
1967 // Only do this if the casts both really cause code to be generated.
1968 ShouldOptimizeCast(Op0C->getOpcode(), Op0C->getOperand(0),
1970 ShouldOptimizeCast(Op1C->getOpcode(), Op1C->getOperand(0),
1972 Value *NewOp = Builder->CreateXor(Op0C->getOperand(0),
1973 Op1C->getOperand(0), I.getName());
1974 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
1979 return Changed ? &I : 0;