1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visit functions for load, store and alloca.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombine.h"
15 #include "llvm/IntrinsicInst.h"
16 #include "llvm/Analysis/Loads.h"
17 #include "llvm/Target/TargetData.h"
18 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
19 #include "llvm/Transforms/Utils/Local.h"
20 #include "llvm/ADT/Statistic.h"
23 STATISTIC(NumDeadStore, "Number of dead stores eliminated");
25 Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
26 // Ensure that the alloca array size argument has type intptr_t, so that
27 // any casting is exposed early.
29 const Type *IntPtrTy = TD->getIntPtrType(AI.getContext());
30 if (AI.getArraySize()->getType() != IntPtrTy) {
31 Value *V = Builder->CreateIntCast(AI.getArraySize(),
38 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
39 if (AI.isArrayAllocation()) { // Check C != 1
40 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
42 ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
43 assert(isa<AllocaInst>(AI) && "Unknown type of allocation inst!");
44 AllocaInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
45 New->setAlignment(AI.getAlignment());
47 // Scan to the end of the allocation instructions, to skip over a block of
48 // allocas if possible...also skip interleaved debug info
50 BasicBlock::iterator It = New;
51 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It;
53 // Now that I is pointing to the first non-allocation-inst in the block,
54 // insert our getelementptr instruction...
56 Value *NullIdx =Constant::getNullValue(Type::getInt32Ty(AI.getContext()));
60 Value *V = GetElementPtrInst::CreateInBounds(New, Idx, Idx + 2,
61 New->getName()+".sub", It);
63 // Now make everything use the getelementptr instead of the original
65 return ReplaceInstUsesWith(AI, V);
66 } else if (isa<UndefValue>(AI.getArraySize())) {
67 return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
71 if (TD && isa<AllocaInst>(AI) && AI.getAllocatedType()->isSized()) {
72 // If alloca'ing a zero byte object, replace the alloca with a null pointer.
73 // Note that we only do this for alloca's, because malloc should allocate
74 // and return a unique pointer, even for a zero byte allocation.
75 if (TD->getTypeAllocSize(AI.getAllocatedType()) == 0)
76 return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
78 // If the alignment is 0 (unspecified), assign it the preferred alignment.
79 if (AI.getAlignment() == 0)
80 AI.setAlignment(TD->getPrefTypeAlignment(AI.getAllocatedType()));
87 /// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible.
88 static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
89 const TargetData *TD) {
90 User *CI = cast<User>(LI.getOperand(0));
91 Value *CastOp = CI->getOperand(0);
93 const PointerType *DestTy = cast<PointerType>(CI->getType());
94 const Type *DestPTy = DestTy->getElementType();
95 if (const PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType())) {
97 // If the address spaces don't match, don't eliminate the cast.
98 if (DestTy->getAddressSpace() != SrcTy->getAddressSpace())
101 const Type *SrcPTy = SrcTy->getElementType();
103 if (DestPTy->isIntegerTy() || DestPTy->isPointerTy() ||
104 DestPTy->isVectorTy()) {
105 // If the source is an array, the code below will not succeed. Check to
106 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
108 if (const ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy))
109 if (Constant *CSrc = dyn_cast<Constant>(CastOp))
110 if (ASrcTy->getNumElements() != 0) {
112 Idxs[0] = Constant::getNullValue(Type::getInt32Ty(LI.getContext()));
114 CastOp = ConstantExpr::getGetElementPtr(CSrc, Idxs, 2);
115 SrcTy = cast<PointerType>(CastOp->getType());
116 SrcPTy = SrcTy->getElementType();
119 if (IC.getTargetData() &&
120 (SrcPTy->isIntegerTy() || SrcPTy->isPointerTy() ||
121 SrcPTy->isVectorTy()) &&
122 // Do not allow turning this into a load of an integer, which is then
123 // casted to a pointer, this pessimizes pointer analysis a lot.
124 (SrcPTy->isPointerTy() == LI.getType()->isPointerTy()) &&
125 IC.getTargetData()->getTypeSizeInBits(SrcPTy) ==
126 IC.getTargetData()->getTypeSizeInBits(DestPTy)) {
128 // Okay, we are casting from one integer or pointer type to another of
129 // the same size. Instead of casting the pointer before the load, cast
130 // the result of the loaded value.
132 IC.Builder->CreateLoad(CastOp, LI.isVolatile(), CI->getName());
133 NewLoad->setAlignment(LI.getAlignment());
134 // Now cast the result of the load.
135 return new BitCastInst(NewLoad, LI.getType());
142 Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
143 Value *Op = LI.getOperand(0);
145 // Attempt to improve the alignment.
147 unsigned KnownAlign =
148 GetOrEnforceKnownAlignment(Op, TD->getPrefTypeAlignment(LI.getType()));
150 (LI.getAlignment() == 0 ? TD->getABITypeAlignment(LI.getType()) :
152 LI.setAlignment(KnownAlign);
155 // load (cast X) --> cast (load X) iff safe.
156 if (isa<CastInst>(Op))
157 if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
160 // None of the following transforms are legal for volatile loads.
161 if (LI.isVolatile()) return 0;
163 // Do really simple store-to-load forwarding and load CSE, to catch cases
164 // where there are several consequtive memory accesses to the same location,
165 // separated by a few arithmetic operations.
166 BasicBlock::iterator BBI = &LI;
167 if (Value *AvailableVal = FindAvailableLoadedValue(Op, LI.getParent(), BBI,6))
168 return ReplaceInstUsesWith(LI, AvailableVal);
170 // load(gep null, ...) -> unreachable
171 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
172 const Value *GEPI0 = GEPI->getOperand(0);
173 // TODO: Consider a target hook for valid address spaces for this xform.
174 if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
175 // Insert a new store to null instruction before the load to indicate
176 // that this code is not reachable. We do this instead of inserting
177 // an unreachable instruction directly because we cannot modify the
179 new StoreInst(UndefValue::get(LI.getType()),
180 Constant::getNullValue(Op->getType()), &LI);
181 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
185 // load null/undef -> unreachable
186 // TODO: Consider a target hook for valid address spaces for this xform.
187 if (isa<UndefValue>(Op) ||
188 (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) {
189 // Insert a new store to null instruction before the load to indicate that
190 // this code is not reachable. We do this instead of inserting an
191 // unreachable instruction directly because we cannot modify the CFG.
192 new StoreInst(UndefValue::get(LI.getType()),
193 Constant::getNullValue(Op->getType()), &LI);
194 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
197 // Instcombine load (constantexpr_cast global) -> cast (load global)
198 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op))
200 if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
203 if (Op->hasOneUse()) {
204 // Change select and PHI nodes to select values instead of addresses: this
205 // helps alias analysis out a lot, allows many others simplifications, and
206 // exposes redundancy in the code.
208 // Note that we cannot do the transformation unless we know that the
209 // introduced loads cannot trap! Something like this is valid as long as
210 // the condition is always false: load (select bool %C, int* null, int* %G),
211 // but it would not be valid if we transformed it to load from null
214 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
215 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
216 unsigned Align = LI.getAlignment();
217 if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align, TD) &&
218 isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align, TD)) {
219 LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
220 SI->getOperand(1)->getName()+".val");
221 LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
222 SI->getOperand(2)->getName()+".val");
223 V1->setAlignment(Align);
224 V2->setAlignment(Align);
225 return SelectInst::Create(SI->getCondition(), V1, V2);
228 // load (select (cond, null, P)) -> load P
229 if (Constant *C = dyn_cast<Constant>(SI->getOperand(1)))
230 if (C->isNullValue()) {
231 LI.setOperand(0, SI->getOperand(2));
235 // load (select (cond, P, null)) -> load P
236 if (Constant *C = dyn_cast<Constant>(SI->getOperand(2)))
237 if (C->isNullValue()) {
238 LI.setOperand(0, SI->getOperand(1));
246 /// InstCombineStoreToCast - Fold store V, (cast P) -> store (cast V), P
247 /// when possible. This makes it generally easy to do alias analysis and/or
248 /// SROA/mem2reg of the memory object.
249 static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
250 User *CI = cast<User>(SI.getOperand(1));
251 Value *CastOp = CI->getOperand(0);
253 const Type *DestPTy = cast<PointerType>(CI->getType())->getElementType();
254 const PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType());
255 if (SrcTy == 0) return 0;
257 const Type *SrcPTy = SrcTy->getElementType();
259 if (!DestPTy->isIntegerTy() && !DestPTy->isPointerTy())
262 /// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep"
263 /// to its first element. This allows us to handle things like:
264 /// store i32 xxx, (bitcast {foo*, float}* %P to i32*)
266 SmallVector<Value*, 4> NewGEPIndices;
268 // If the source is an array, the code below will not succeed. Check to
269 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
271 if (SrcPTy->isArrayTy() || SrcPTy->isStructTy()) {
272 // Index through pointer.
273 Constant *Zero = Constant::getNullValue(Type::getInt32Ty(SI.getContext()));
274 NewGEPIndices.push_back(Zero);
277 if (const StructType *STy = dyn_cast<StructType>(SrcPTy)) {
278 if (!STy->getNumElements()) /* Struct can be empty {} */
280 NewGEPIndices.push_back(Zero);
281 SrcPTy = STy->getElementType(0);
282 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcPTy)) {
283 NewGEPIndices.push_back(Zero);
284 SrcPTy = ATy->getElementType();
290 SrcTy = PointerType::get(SrcPTy, SrcTy->getAddressSpace());
293 if (!SrcPTy->isIntegerTy() && !SrcPTy->isPointerTy())
296 // If the pointers point into different address spaces or if they point to
297 // values with different sizes, we can't do the transformation.
298 if (!IC.getTargetData() ||
299 SrcTy->getAddressSpace() !=
300 cast<PointerType>(CI->getType())->getAddressSpace() ||
301 IC.getTargetData()->getTypeSizeInBits(SrcPTy) !=
302 IC.getTargetData()->getTypeSizeInBits(DestPTy))
305 // Okay, we are casting from one integer or pointer type to another of
306 // the same size. Instead of casting the pointer before
307 // the store, cast the value to be stored.
309 Value *SIOp0 = SI.getOperand(0);
310 Instruction::CastOps opcode = Instruction::BitCast;
311 const Type* CastSrcTy = SIOp0->getType();
312 const Type* CastDstTy = SrcPTy;
313 if (CastDstTy->isPointerTy()) {
314 if (CastSrcTy->isIntegerTy())
315 opcode = Instruction::IntToPtr;
316 } else if (CastDstTy->isIntegerTy()) {
317 if (SIOp0->getType()->isPointerTy())
318 opcode = Instruction::PtrToInt;
321 // SIOp0 is a pointer to aggregate and this is a store to the first field,
322 // emit a GEP to index into its first field.
323 if (!NewGEPIndices.empty())
324 CastOp = IC.Builder->CreateInBoundsGEP(CastOp, NewGEPIndices.begin(),
325 NewGEPIndices.end());
327 NewCast = IC.Builder->CreateCast(opcode, SIOp0, CastDstTy,
328 SIOp0->getName()+".c");
329 return new StoreInst(NewCast, CastOp);
332 /// equivalentAddressValues - Test if A and B will obviously have the same
333 /// value. This includes recognizing that %t0 and %t1 will have the same
334 /// value in code like this:
335 /// %t0 = getelementptr \@a, 0, 3
336 /// store i32 0, i32* %t0
337 /// %t1 = getelementptr \@a, 0, 3
338 /// %t2 = load i32* %t1
340 static bool equivalentAddressValues(Value *A, Value *B) {
341 // Test if the values are trivially equivalent.
342 if (A == B) return true;
344 // Test if the values come form identical arithmetic instructions.
345 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
346 // its only used to compare two uses within the same basic block, which
347 // means that they'll always either have the same value or one of them
348 // will have an undefined value.
349 if (isa<BinaryOperator>(A) ||
352 isa<GetElementPtrInst>(A))
353 if (Instruction *BI = dyn_cast<Instruction>(B))
354 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
357 // Otherwise they may not be equivalent.
361 // If this instruction has two uses, one of which is a llvm.dbg.declare,
362 // return the llvm.dbg.declare.
363 DbgDeclareInst *InstCombiner::hasOneUsePlusDeclare(Value *V) {
366 for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
368 if (DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(UI))
370 if (isa<BitCastInst>(UI) && UI->hasOneUse()) {
371 if (DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(UI->use_begin()))
378 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
379 Value *Val = SI.getOperand(0);
380 Value *Ptr = SI.getOperand(1);
382 // If the RHS is an alloca with a single use, zapify the store, making the
384 // If the RHS is an alloca with a two uses, the other one being a
385 // llvm.dbg.declare, zapify the store and the declare, making the
386 // alloca dead. We must do this to prevent declares from affecting
388 if (!SI.isVolatile()) {
389 if (Ptr->hasOneUse()) {
390 if (isa<AllocaInst>(Ptr))
391 return EraseInstFromFunction(SI);
392 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
393 if (isa<AllocaInst>(GEP->getOperand(0))) {
394 if (GEP->getOperand(0)->hasOneUse())
395 return EraseInstFromFunction(SI);
396 if (DbgDeclareInst *DI = hasOneUsePlusDeclare(GEP->getOperand(0))) {
397 EraseInstFromFunction(*DI);
398 return EraseInstFromFunction(SI);
403 if (DbgDeclareInst *DI = hasOneUsePlusDeclare(Ptr)) {
404 EraseInstFromFunction(*DI);
405 return EraseInstFromFunction(SI);
409 // Attempt to improve the alignment.
411 unsigned KnownAlign =
412 GetOrEnforceKnownAlignment(Ptr, TD->getPrefTypeAlignment(Val->getType()));
414 (SI.getAlignment() == 0 ? TD->getABITypeAlignment(Val->getType()) :
416 SI.setAlignment(KnownAlign);
419 // Do really simple DSE, to catch cases where there are several consecutive
420 // stores to the same location, separated by a few arithmetic operations. This
421 // situation often occurs with bitfield accesses.
422 BasicBlock::iterator BBI = &SI;
423 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
426 // Don't count debug info directives, lest they affect codegen,
427 // and we skip pointer-to-pointer bitcasts, which are NOPs.
428 if (isa<DbgInfoIntrinsic>(BBI) ||
429 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
434 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
435 // Prev store isn't volatile, and stores to the same location?
436 if (!PrevSI->isVolatile() &&equivalentAddressValues(PrevSI->getOperand(1),
440 EraseInstFromFunction(*PrevSI);
446 // If this is a load, we have to stop. However, if the loaded value is from
447 // the pointer we're loading and is producing the pointer we're storing,
448 // then *this* store is dead (X = load P; store X -> P).
449 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
450 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
452 return EraseInstFromFunction(SI);
454 // Otherwise, this is a load from some other location. Stores before it
459 // Don't skip over loads or things that can modify memory.
460 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
465 if (SI.isVolatile()) return 0; // Don't hack volatile stores.
467 // store X, null -> turns into 'unreachable' in SimplifyCFG
468 if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
469 if (!isa<UndefValue>(Val)) {
470 SI.setOperand(0, UndefValue::get(Val->getType()));
471 if (Instruction *U = dyn_cast<Instruction>(Val))
472 Worklist.Add(U); // Dropped a use.
474 return 0; // Do not modify these!
477 // store undef, Ptr -> noop
478 if (isa<UndefValue>(Val))
479 return EraseInstFromFunction(SI);
481 // If the pointer destination is a cast, see if we can fold the cast into the
483 if (isa<CastInst>(Ptr))
484 if (Instruction *Res = InstCombineStoreToCast(*this, SI))
486 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
488 if (Instruction *Res = InstCombineStoreToCast(*this, SI))
492 // If this store is the last instruction in the basic block (possibly
493 // excepting debug info instructions), and if the block ends with an
494 // unconditional branch, try to move it to the successor block.
498 } while (isa<DbgInfoIntrinsic>(BBI) ||
499 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
500 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
501 if (BI->isUnconditional())
502 if (SimplifyStoreAtEndOfBlock(SI))
503 return 0; // xform done!
508 /// SimplifyStoreAtEndOfBlock - Turn things like:
509 /// if () { *P = v1; } else { *P = v2 }
510 /// into a phi node with a store in the successor.
512 /// Simplify things like:
513 /// *P = v1; if () { *P = v2; }
514 /// into a phi node with a store in the successor.
516 bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
517 BasicBlock *StoreBB = SI.getParent();
519 // Check to see if the successor block has exactly two incoming edges. If
520 // so, see if the other predecessor contains a store to the same location.
521 // if so, insert a PHI node (if needed) and move the stores down.
522 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
524 // Determine whether Dest has exactly two predecessors and, if so, compute
525 // the other predecessor.
526 pred_iterator PI = pred_begin(DestBB);
527 BasicBlock *OtherBB = 0;
531 if (PI == pred_end(DestBB))
534 if (*PI != StoreBB) {
539 if (++PI != pred_end(DestBB))
542 // Bail out if all the relevant blocks aren't distinct (this can happen,
543 // for example, if SI is in an infinite loop)
544 if (StoreBB == DestBB || OtherBB == DestBB)
547 // Verify that the other block ends in a branch and is not otherwise empty.
548 BasicBlock::iterator BBI = OtherBB->getTerminator();
549 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
550 if (!OtherBr || BBI == OtherBB->begin())
553 // If the other block ends in an unconditional branch, check for the 'if then
554 // else' case. there is an instruction before the branch.
555 StoreInst *OtherStore = 0;
556 if (OtherBr->isUnconditional()) {
558 // Skip over debugging info.
559 while (isa<DbgInfoIntrinsic>(BBI) ||
560 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
561 if (BBI==OtherBB->begin())
565 // If this isn't a store, isn't a store to the same location, or if the
566 // alignments differ, bail out.
567 OtherStore = dyn_cast<StoreInst>(BBI);
568 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
569 OtherStore->getAlignment() != SI.getAlignment())
572 // Otherwise, the other block ended with a conditional branch. If one of the
573 // destinations is StoreBB, then we have the if/then case.
574 if (OtherBr->getSuccessor(0) != StoreBB &&
575 OtherBr->getSuccessor(1) != StoreBB)
578 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
579 // if/then triangle. See if there is a store to the same ptr as SI that
582 // Check to see if we find the matching store.
583 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
584 if (OtherStore->getOperand(1) != SI.getOperand(1) ||
585 OtherStore->getAlignment() != SI.getAlignment())
589 // If we find something that may be using or overwriting the stored
590 // value, or if we run out of instructions, we can't do the xform.
591 if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
592 BBI == OtherBB->begin())
596 // In order to eliminate the store in OtherBr, we have to
597 // make sure nothing reads or overwrites the stored value in
599 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
600 // FIXME: This should really be AA driven.
601 if (I->mayReadFromMemory() || I->mayWriteToMemory())
606 // Insert a PHI node now if we need it.
607 Value *MergedVal = OtherStore->getOperand(0);
608 if (MergedVal != SI.getOperand(0)) {
609 PHINode *PN = PHINode::Create(MergedVal->getType(), "storemerge");
610 PN->reserveOperandSpace(2);
611 PN->addIncoming(SI.getOperand(0), SI.getParent());
612 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
613 MergedVal = InsertNewInstBefore(PN, DestBB->front());
616 // Advance to a place where it is safe to insert the new store and
618 BBI = DestBB->getFirstNonPHI();
619 InsertNewInstBefore(new StoreInst(MergedVal, SI.getOperand(1),
620 OtherStore->isVolatile(),
621 SI.getAlignment()), *BBI);
623 // Nuke the old stores.
624 EraseInstFromFunction(SI);
625 EraseInstFromFunction(*OtherStore);