1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visit functions for load, store and alloca.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombine.h"
15 #include "llvm/IntrinsicInst.h"
16 #include "llvm/Analysis/Loads.h"
17 #include "llvm/Target/TargetData.h"
18 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
19 #include "llvm/Transforms/Utils/Local.h"
20 #include "llvm/ADT/Statistic.h"
23 STATISTIC(NumDeadStore, "Number of dead stores eliminated");
25 Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
26 // Ensure that the alloca array size argument has type intptr_t, so that
27 // any casting is exposed early.
29 Type *IntPtrTy = TD->getIntPtrType(AI.getContext());
30 if (AI.getArraySize()->getType() != IntPtrTy) {
31 Value *V = Builder->CreateIntCast(AI.getArraySize(),
38 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
39 if (AI.isArrayAllocation()) { // Check C != 1
40 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
42 ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
43 AllocaInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
44 New->setAlignment(AI.getAlignment());
46 // Scan to the end of the allocation instructions, to skip over a block of
47 // allocas if possible...also skip interleaved debug info
49 BasicBlock::iterator It = New;
50 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It;
52 // Now that I is pointing to the first non-allocation-inst in the block,
53 // insert our getelementptr instruction...
55 Value *NullIdx =Constant::getNullValue(Type::getInt32Ty(AI.getContext()));
60 GetElementPtrInst::CreateInBounds(New, Idx, New->getName()+".sub");
61 InsertNewInstBefore(GEP, *It);
63 // Now make everything use the getelementptr instead of the original
65 return ReplaceInstUsesWith(AI, GEP);
66 } else if (isa<UndefValue>(AI.getArraySize())) {
67 return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
71 if (TD && AI.getAllocatedType()->isSized()) {
72 // If the alignment is 0 (unspecified), assign it the preferred alignment.
73 if (AI.getAlignment() == 0)
74 AI.setAlignment(TD->getPrefTypeAlignment(AI.getAllocatedType()));
76 // Move all alloca's of zero byte objects to the entry block and merge them
77 // together. Note that we only do this for alloca's, because malloc should
78 // allocate and return a unique pointer, even for a zero byte allocation.
79 if (TD->getTypeAllocSize(AI.getAllocatedType()) == 0) {
80 // For a zero sized alloca there is no point in doing an array allocation.
81 // This is helpful if the array size is a complicated expression not used
83 if (AI.isArrayAllocation()) {
84 AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
88 // Get the first instruction in the entry block.
89 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
90 Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
91 if (FirstInst != &AI) {
92 // If the entry block doesn't start with a zero-size alloca then move
93 // this one to the start of the entry block. There is no problem with
94 // dominance as the array size was forced to a constant earlier already.
95 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
96 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
97 TD->getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
98 AI.moveBefore(FirstInst);
102 // Replace this zero-sized alloca with the one at the start of the entry
103 // block after ensuring that the address will be aligned enough for both
106 std::max(TD->getPrefTypeAlignment(EntryAI->getAllocatedType()),
107 TD->getPrefTypeAlignment(AI.getAllocatedType()));
108 EntryAI->setAlignment(MaxAlign);
109 if (AI.getType() != EntryAI->getType())
110 return new BitCastInst(EntryAI, AI.getType());
111 return ReplaceInstUsesWith(AI, EntryAI);
116 // At last, use the generic allocation site handler to aggressively remove
118 return visitAllocSite(AI);
122 /// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible.
123 static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
124 const TargetData *TD) {
125 User *CI = cast<User>(LI.getOperand(0));
126 Value *CastOp = CI->getOperand(0);
128 PointerType *DestTy = cast<PointerType>(CI->getType());
129 Type *DestPTy = DestTy->getElementType();
130 if (PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType())) {
132 // If the address spaces don't match, don't eliminate the cast.
133 if (DestTy->getAddressSpace() != SrcTy->getAddressSpace())
136 Type *SrcPTy = SrcTy->getElementType();
138 if (DestPTy->isIntegerTy() || DestPTy->isPointerTy() ||
139 DestPTy->isVectorTy()) {
140 // If the source is an array, the code below will not succeed. Check to
141 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
143 if (ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy))
144 if (Constant *CSrc = dyn_cast<Constant>(CastOp))
145 if (ASrcTy->getNumElements() != 0) {
147 Idxs[0] = Constant::getNullValue(Type::getInt32Ty(LI.getContext()));
149 CastOp = ConstantExpr::getGetElementPtr(CSrc, Idxs);
150 SrcTy = cast<PointerType>(CastOp->getType());
151 SrcPTy = SrcTy->getElementType();
154 if (IC.getTargetData() &&
155 (SrcPTy->isIntegerTy() || SrcPTy->isPointerTy() ||
156 SrcPTy->isVectorTy()) &&
157 // Do not allow turning this into a load of an integer, which is then
158 // casted to a pointer, this pessimizes pointer analysis a lot.
159 (SrcPTy->isPointerTy() == LI.getType()->isPointerTy()) &&
160 IC.getTargetData()->getTypeSizeInBits(SrcPTy) ==
161 IC.getTargetData()->getTypeSizeInBits(DestPTy)) {
163 // Okay, we are casting from one integer or pointer type to another of
164 // the same size. Instead of casting the pointer before the load, cast
165 // the result of the loaded value.
167 IC.Builder->CreateLoad(CastOp, LI.isVolatile(), CI->getName());
168 NewLoad->setAlignment(LI.getAlignment());
169 NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope());
170 // Now cast the result of the load.
171 return new BitCastInst(NewLoad, LI.getType());
178 Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
179 Value *Op = LI.getOperand(0);
181 // Attempt to improve the alignment.
183 unsigned KnownAlign =
184 getOrEnforceKnownAlignment(Op, TD->getPrefTypeAlignment(LI.getType()),TD);
185 unsigned LoadAlign = LI.getAlignment();
186 unsigned EffectiveLoadAlign = LoadAlign != 0 ? LoadAlign :
187 TD->getABITypeAlignment(LI.getType());
189 if (KnownAlign > EffectiveLoadAlign)
190 LI.setAlignment(KnownAlign);
191 else if (LoadAlign == 0)
192 LI.setAlignment(EffectiveLoadAlign);
195 // load (cast X) --> cast (load X) iff safe.
196 if (isa<CastInst>(Op))
197 if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
200 // None of the following transforms are legal for volatile/atomic loads.
201 // FIXME: Some of it is okay for atomic loads; needs refactoring.
202 if (!LI.isSimple()) return 0;
204 // Do really simple store-to-load forwarding and load CSE, to catch cases
205 // where there are several consecutive memory accesses to the same location,
206 // separated by a few arithmetic operations.
207 BasicBlock::iterator BBI = &LI;
208 if (Value *AvailableVal = FindAvailableLoadedValue(Op, LI.getParent(), BBI,6))
209 return ReplaceInstUsesWith(LI, AvailableVal);
211 // load(gep null, ...) -> unreachable
212 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
213 const Value *GEPI0 = GEPI->getOperand(0);
214 // TODO: Consider a target hook for valid address spaces for this xform.
215 if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
216 // Insert a new store to null instruction before the load to indicate
217 // that this code is not reachable. We do this instead of inserting
218 // an unreachable instruction directly because we cannot modify the
220 new StoreInst(UndefValue::get(LI.getType()),
221 Constant::getNullValue(Op->getType()), &LI);
222 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
226 // load null/undef -> unreachable
227 // TODO: Consider a target hook for valid address spaces for this xform.
228 if (isa<UndefValue>(Op) ||
229 (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) {
230 // Insert a new store to null instruction before the load to indicate that
231 // this code is not reachable. We do this instead of inserting an
232 // unreachable instruction directly because we cannot modify the CFG.
233 new StoreInst(UndefValue::get(LI.getType()),
234 Constant::getNullValue(Op->getType()), &LI);
235 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
238 // Instcombine load (constantexpr_cast global) -> cast (load global)
239 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op))
241 if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
244 if (Op->hasOneUse()) {
245 // Change select and PHI nodes to select values instead of addresses: this
246 // helps alias analysis out a lot, allows many others simplifications, and
247 // exposes redundancy in the code.
249 // Note that we cannot do the transformation unless we know that the
250 // introduced loads cannot trap! Something like this is valid as long as
251 // the condition is always false: load (select bool %C, int* null, int* %G),
252 // but it would not be valid if we transformed it to load from null
255 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
256 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
257 unsigned Align = LI.getAlignment();
258 if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align, TD) &&
259 isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align, TD)) {
260 LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
261 SI->getOperand(1)->getName()+".val");
262 LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
263 SI->getOperand(2)->getName()+".val");
264 V1->setAlignment(Align);
265 V2->setAlignment(Align);
266 return SelectInst::Create(SI->getCondition(), V1, V2);
269 // load (select (cond, null, P)) -> load P
270 if (Constant *C = dyn_cast<Constant>(SI->getOperand(1)))
271 if (C->isNullValue()) {
272 LI.setOperand(0, SI->getOperand(2));
276 // load (select (cond, P, null)) -> load P
277 if (Constant *C = dyn_cast<Constant>(SI->getOperand(2)))
278 if (C->isNullValue()) {
279 LI.setOperand(0, SI->getOperand(1));
287 /// InstCombineStoreToCast - Fold store V, (cast P) -> store (cast V), P
288 /// when possible. This makes it generally easy to do alias analysis and/or
289 /// SROA/mem2reg of the memory object.
290 static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
291 User *CI = cast<User>(SI.getOperand(1));
292 Value *CastOp = CI->getOperand(0);
294 Type *DestPTy = cast<PointerType>(CI->getType())->getElementType();
295 PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType());
296 if (SrcTy == 0) return 0;
298 Type *SrcPTy = SrcTy->getElementType();
300 if (!DestPTy->isIntegerTy() && !DestPTy->isPointerTy())
303 /// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep"
304 /// to its first element. This allows us to handle things like:
305 /// store i32 xxx, (bitcast {foo*, float}* %P to i32*)
307 SmallVector<Value*, 4> NewGEPIndices;
309 // If the source is an array, the code below will not succeed. Check to
310 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
312 if (SrcPTy->isArrayTy() || SrcPTy->isStructTy()) {
313 // Index through pointer.
314 Constant *Zero = Constant::getNullValue(Type::getInt32Ty(SI.getContext()));
315 NewGEPIndices.push_back(Zero);
318 if (StructType *STy = dyn_cast<StructType>(SrcPTy)) {
319 if (!STy->getNumElements()) /* Struct can be empty {} */
321 NewGEPIndices.push_back(Zero);
322 SrcPTy = STy->getElementType(0);
323 } else if (ArrayType *ATy = dyn_cast<ArrayType>(SrcPTy)) {
324 NewGEPIndices.push_back(Zero);
325 SrcPTy = ATy->getElementType();
331 SrcTy = PointerType::get(SrcPTy, SrcTy->getAddressSpace());
334 if (!SrcPTy->isIntegerTy() && !SrcPTy->isPointerTy())
337 // If the pointers point into different address spaces or if they point to
338 // values with different sizes, we can't do the transformation.
339 if (!IC.getTargetData() ||
340 SrcTy->getAddressSpace() !=
341 cast<PointerType>(CI->getType())->getAddressSpace() ||
342 IC.getTargetData()->getTypeSizeInBits(SrcPTy) !=
343 IC.getTargetData()->getTypeSizeInBits(DestPTy))
346 // Okay, we are casting from one integer or pointer type to another of
347 // the same size. Instead of casting the pointer before
348 // the store, cast the value to be stored.
350 Value *SIOp0 = SI.getOperand(0);
351 Instruction::CastOps opcode = Instruction::BitCast;
352 Type* CastSrcTy = SIOp0->getType();
353 Type* CastDstTy = SrcPTy;
354 if (CastDstTy->isPointerTy()) {
355 if (CastSrcTy->isIntegerTy())
356 opcode = Instruction::IntToPtr;
357 } else if (CastDstTy->isIntegerTy()) {
358 if (SIOp0->getType()->isPointerTy())
359 opcode = Instruction::PtrToInt;
362 // SIOp0 is a pointer to aggregate and this is a store to the first field,
363 // emit a GEP to index into its first field.
364 if (!NewGEPIndices.empty())
365 CastOp = IC.Builder->CreateInBoundsGEP(CastOp, NewGEPIndices);
367 NewCast = IC.Builder->CreateCast(opcode, SIOp0, CastDstTy,
368 SIOp0->getName()+".c");
369 SI.setOperand(0, NewCast);
370 SI.setOperand(1, CastOp);
374 /// equivalentAddressValues - Test if A and B will obviously have the same
375 /// value. This includes recognizing that %t0 and %t1 will have the same
376 /// value in code like this:
377 /// %t0 = getelementptr \@a, 0, 3
378 /// store i32 0, i32* %t0
379 /// %t1 = getelementptr \@a, 0, 3
380 /// %t2 = load i32* %t1
382 static bool equivalentAddressValues(Value *A, Value *B) {
383 // Test if the values are trivially equivalent.
384 if (A == B) return true;
386 // Test if the values come form identical arithmetic instructions.
387 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
388 // its only used to compare two uses within the same basic block, which
389 // means that they'll always either have the same value or one of them
390 // will have an undefined value.
391 if (isa<BinaryOperator>(A) ||
394 isa<GetElementPtrInst>(A))
395 if (Instruction *BI = dyn_cast<Instruction>(B))
396 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
399 // Otherwise they may not be equivalent.
403 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
404 Value *Val = SI.getOperand(0);
405 Value *Ptr = SI.getOperand(1);
407 // Attempt to improve the alignment.
409 unsigned KnownAlign =
410 getOrEnforceKnownAlignment(Ptr, TD->getPrefTypeAlignment(Val->getType()),
412 unsigned StoreAlign = SI.getAlignment();
413 unsigned EffectiveStoreAlign = StoreAlign != 0 ? StoreAlign :
414 TD->getABITypeAlignment(Val->getType());
416 if (KnownAlign > EffectiveStoreAlign)
417 SI.setAlignment(KnownAlign);
418 else if (StoreAlign == 0)
419 SI.setAlignment(EffectiveStoreAlign);
422 // Don't hack volatile/atomic stores.
423 // FIXME: Some bits are legal for atomic stores; needs refactoring.
424 if (!SI.isSimple()) return 0;
426 // If the RHS is an alloca with a single use, zapify the store, making the
428 if (Ptr->hasOneUse()) {
429 if (isa<AllocaInst>(Ptr))
430 return EraseInstFromFunction(SI);
431 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
432 if (isa<AllocaInst>(GEP->getOperand(0))) {
433 if (GEP->getOperand(0)->hasOneUse())
434 return EraseInstFromFunction(SI);
439 // Do really simple DSE, to catch cases where there are several consecutive
440 // stores to the same location, separated by a few arithmetic operations. This
441 // situation often occurs with bitfield accesses.
442 BasicBlock::iterator BBI = &SI;
443 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
446 // Don't count debug info directives, lest they affect codegen,
447 // and we skip pointer-to-pointer bitcasts, which are NOPs.
448 if (isa<DbgInfoIntrinsic>(BBI) ||
449 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
454 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
455 // Prev store isn't volatile, and stores to the same location?
456 if (PrevSI->isSimple() && equivalentAddressValues(PrevSI->getOperand(1),
460 EraseInstFromFunction(*PrevSI);
466 // If this is a load, we have to stop. However, if the loaded value is from
467 // the pointer we're loading and is producing the pointer we're storing,
468 // then *this* store is dead (X = load P; store X -> P).
469 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
470 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
472 return EraseInstFromFunction(SI);
474 // Otherwise, this is a load from some other location. Stores before it
479 // Don't skip over loads or things that can modify memory.
480 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
484 // store X, null -> turns into 'unreachable' in SimplifyCFG
485 if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
486 if (!isa<UndefValue>(Val)) {
487 SI.setOperand(0, UndefValue::get(Val->getType()));
488 if (Instruction *U = dyn_cast<Instruction>(Val))
489 Worklist.Add(U); // Dropped a use.
491 return 0; // Do not modify these!
494 // store undef, Ptr -> noop
495 if (isa<UndefValue>(Val))
496 return EraseInstFromFunction(SI);
498 // If the pointer destination is a cast, see if we can fold the cast into the
500 if (isa<CastInst>(Ptr))
501 if (Instruction *Res = InstCombineStoreToCast(*this, SI))
503 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
505 if (Instruction *Res = InstCombineStoreToCast(*this, SI))
509 // If this store is the last instruction in the basic block (possibly
510 // excepting debug info instructions), and if the block ends with an
511 // unconditional branch, try to move it to the successor block.
515 } while (isa<DbgInfoIntrinsic>(BBI) ||
516 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
517 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
518 if (BI->isUnconditional())
519 if (SimplifyStoreAtEndOfBlock(SI))
520 return 0; // xform done!
525 /// SimplifyStoreAtEndOfBlock - Turn things like:
526 /// if () { *P = v1; } else { *P = v2 }
527 /// into a phi node with a store in the successor.
529 /// Simplify things like:
530 /// *P = v1; if () { *P = v2; }
531 /// into a phi node with a store in the successor.
533 bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
534 BasicBlock *StoreBB = SI.getParent();
536 // Check to see if the successor block has exactly two incoming edges. If
537 // so, see if the other predecessor contains a store to the same location.
538 // if so, insert a PHI node (if needed) and move the stores down.
539 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
541 // Determine whether Dest has exactly two predecessors and, if so, compute
542 // the other predecessor.
543 pred_iterator PI = pred_begin(DestBB);
545 BasicBlock *OtherBB = 0;
550 if (++PI == pred_end(DestBB))
559 if (++PI != pred_end(DestBB))
562 // Bail out if all the relevant blocks aren't distinct (this can happen,
563 // for example, if SI is in an infinite loop)
564 if (StoreBB == DestBB || OtherBB == DestBB)
567 // Verify that the other block ends in a branch and is not otherwise empty.
568 BasicBlock::iterator BBI = OtherBB->getTerminator();
569 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
570 if (!OtherBr || BBI == OtherBB->begin())
573 // If the other block ends in an unconditional branch, check for the 'if then
574 // else' case. there is an instruction before the branch.
575 StoreInst *OtherStore = 0;
576 if (OtherBr->isUnconditional()) {
578 // Skip over debugging info.
579 while (isa<DbgInfoIntrinsic>(BBI) ||
580 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
581 if (BBI==OtherBB->begin())
585 // If this isn't a store, isn't a store to the same location, or is not the
586 // right kind of store, bail out.
587 OtherStore = dyn_cast<StoreInst>(BBI);
588 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
589 !SI.isSameOperationAs(OtherStore))
592 // Otherwise, the other block ended with a conditional branch. If one of the
593 // destinations is StoreBB, then we have the if/then case.
594 if (OtherBr->getSuccessor(0) != StoreBB &&
595 OtherBr->getSuccessor(1) != StoreBB)
598 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
599 // if/then triangle. See if there is a store to the same ptr as SI that
602 // Check to see if we find the matching store.
603 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
604 if (OtherStore->getOperand(1) != SI.getOperand(1) ||
605 !SI.isSameOperationAs(OtherStore))
609 // If we find something that may be using or overwriting the stored
610 // value, or if we run out of instructions, we can't do the xform.
611 if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
612 BBI == OtherBB->begin())
616 // In order to eliminate the store in OtherBr, we have to
617 // make sure nothing reads or overwrites the stored value in
619 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
620 // FIXME: This should really be AA driven.
621 if (I->mayReadFromMemory() || I->mayWriteToMemory())
626 // Insert a PHI node now if we need it.
627 Value *MergedVal = OtherStore->getOperand(0);
628 if (MergedVal != SI.getOperand(0)) {
629 PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
630 PN->addIncoming(SI.getOperand(0), SI.getParent());
631 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
632 MergedVal = InsertNewInstBefore(PN, DestBB->front());
635 // Advance to a place where it is safe to insert the new store and
637 BBI = DestBB->getFirstInsertionPt();
638 StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
643 InsertNewInstBefore(NewSI, *BBI);
644 NewSI->setDebugLoc(OtherStore->getDebugLoc());
646 // Nuke the old stores.
647 EraseInstFromFunction(SI);
648 EraseInstFromFunction(*OtherStore);