1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visit functions for load, store and alloca.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/Statistic.h"
16 #include "llvm/Analysis/Loads.h"
17 #include "llvm/IR/DataLayout.h"
18 #include "llvm/IR/LLVMContext.h"
19 #include "llvm/IR/IntrinsicInst.h"
20 #include "llvm/IR/MDBuilder.h"
21 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
22 #include "llvm/Transforms/Utils/Local.h"
25 #define DEBUG_TYPE "instcombine"
27 STATISTIC(NumDeadStore, "Number of dead stores eliminated");
28 STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
30 /// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
31 /// some part of a constant global variable. This intentionally only accepts
32 /// constant expressions because we can't rewrite arbitrary instructions.
33 static bool pointsToConstantGlobal(Value *V) {
34 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
35 return GV->isConstant();
37 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
38 if (CE->getOpcode() == Instruction::BitCast ||
39 CE->getOpcode() == Instruction::AddrSpaceCast ||
40 CE->getOpcode() == Instruction::GetElementPtr)
41 return pointsToConstantGlobal(CE->getOperand(0));
46 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
47 /// pointer to an alloca. Ignore any reads of the pointer, return false if we
48 /// see any stores or other unknown uses. If we see pointer arithmetic, keep
49 /// track of whether it moves the pointer (with IsOffset) but otherwise traverse
50 /// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
51 /// the alloca, and if the source pointer is a pointer to a constant global, we
52 /// can optimize this.
54 isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
55 SmallVectorImpl<Instruction *> &ToDelete) {
56 // We track lifetime intrinsics as we encounter them. If we decide to go
57 // ahead and replace the value with the global, this lets the caller quickly
58 // eliminate the markers.
60 SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
61 ValuesToInspect.push_back(std::make_pair(V, false));
62 while (!ValuesToInspect.empty()) {
63 auto ValuePair = ValuesToInspect.pop_back_val();
64 const bool IsOffset = ValuePair.second;
65 for (auto &U : ValuePair.first->uses()) {
66 Instruction *I = cast<Instruction>(U.getUser());
68 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
69 // Ignore non-volatile loads, they are always ok.
70 if (!LI->isSimple()) return false;
74 if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
75 // If uses of the bitcast are ok, we are ok.
76 ValuesToInspect.push_back(std::make_pair(I, IsOffset));
79 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
80 // If the GEP has all zero indices, it doesn't offset the pointer. If it
82 ValuesToInspect.push_back(
83 std::make_pair(I, IsOffset || !GEP->hasAllZeroIndices()));
87 if (CallSite CS = I) {
88 // If this is the function being called then we treat it like a load and
93 // Inalloca arguments are clobbered by the call.
94 unsigned ArgNo = CS.getArgumentNo(&U);
95 if (CS.isInAllocaArgument(ArgNo))
98 // If this is a readonly/readnone call site, then we know it is just a
99 // load (but one that potentially returns the value itself), so we can
100 // ignore it if we know that the value isn't captured.
101 if (CS.onlyReadsMemory() &&
102 (CS.getInstruction()->use_empty() || CS.doesNotCapture(ArgNo)))
105 // If this is being passed as a byval argument, the caller is making a
106 // copy, so it is only a read of the alloca.
107 if (CS.isByValArgument(ArgNo))
111 // Lifetime intrinsics can be handled by the caller.
112 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
113 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
114 II->getIntrinsicID() == Intrinsic::lifetime_end) {
115 assert(II->use_empty() && "Lifetime markers have no result to use!");
116 ToDelete.push_back(II);
121 // If this is isn't our memcpy/memmove, reject it as something we can't
123 MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
127 // If the transfer is using the alloca as a source of the transfer, then
128 // ignore it since it is a load (unless the transfer is volatile).
129 if (U.getOperandNo() == 1) {
130 if (MI->isVolatile()) return false;
134 // If we already have seen a copy, reject the second one.
135 if (TheCopy) return false;
137 // If the pointer has been offset from the start of the alloca, we can't
138 // safely handle this.
139 if (IsOffset) return false;
141 // If the memintrinsic isn't using the alloca as the dest, reject it.
142 if (U.getOperandNo() != 0) return false;
144 // If the source of the memcpy/move is not a constant global, reject it.
145 if (!pointsToConstantGlobal(MI->getSource()))
148 // Otherwise, the transform is safe. Remember the copy instruction.
155 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
156 /// modified by a copy from a constant global. If we can prove this, we can
157 /// replace any uses of the alloca with uses of the global directly.
158 static MemTransferInst *
159 isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
160 SmallVectorImpl<Instruction *> &ToDelete) {
161 MemTransferInst *TheCopy = nullptr;
162 if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
167 static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
168 // Check for array size of 1 (scalar allocation).
169 if (!AI.isArrayAllocation()) {
170 // i32 1 is the canonical array size for scalar allocations.
171 if (AI.getArraySize()->getType()->isIntegerTy(32))
175 Value *V = IC.Builder->getInt32(1);
180 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
181 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
182 Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
183 AllocaInst *New = IC.Builder->CreateAlloca(NewTy, nullptr, AI.getName());
184 New->setAlignment(AI.getAlignment());
186 // Scan to the end of the allocation instructions, to skip over a block of
187 // allocas if possible...also skip interleaved debug info
189 BasicBlock::iterator It = New;
190 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
193 // Now that I is pointing to the first non-allocation-inst in the block,
194 // insert our getelementptr instruction...
196 Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType());
197 Value *NullIdx = Constant::getNullValue(IdxTy);
198 Value *Idx[2] = {NullIdx, NullIdx};
200 GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub");
201 IC.InsertNewInstBefore(GEP, *It);
203 // Now make everything use the getelementptr instead of the original
205 return IC.ReplaceInstUsesWith(AI, GEP);
208 if (isa<UndefValue>(AI.getArraySize()))
209 return IC.ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
211 // Ensure that the alloca array size argument has type intptr_t, so that
212 // any casting is exposed early.
213 Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType());
214 if (AI.getArraySize()->getType() != IntPtrTy) {
215 Value *V = IC.Builder->CreateIntCast(AI.getArraySize(), IntPtrTy, false);
223 Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
224 if (auto *I = simplifyAllocaArraySize(*this, AI))
227 if (AI.getAllocatedType()->isSized()) {
228 // If the alignment is 0 (unspecified), assign it the preferred alignment.
229 if (AI.getAlignment() == 0)
230 AI.setAlignment(DL.getPrefTypeAlignment(AI.getAllocatedType()));
232 // Move all alloca's of zero byte objects to the entry block and merge them
233 // together. Note that we only do this for alloca's, because malloc should
234 // allocate and return a unique pointer, even for a zero byte allocation.
235 if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) {
236 // For a zero sized alloca there is no point in doing an array allocation.
237 // This is helpful if the array size is a complicated expression not used
239 if (AI.isArrayAllocation()) {
240 AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
244 // Get the first instruction in the entry block.
245 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
246 Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
247 if (FirstInst != &AI) {
248 // If the entry block doesn't start with a zero-size alloca then move
249 // this one to the start of the entry block. There is no problem with
250 // dominance as the array size was forced to a constant earlier already.
251 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
252 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
253 DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
254 AI.moveBefore(FirstInst);
258 // If the alignment of the entry block alloca is 0 (unspecified),
259 // assign it the preferred alignment.
260 if (EntryAI->getAlignment() == 0)
261 EntryAI->setAlignment(
262 DL.getPrefTypeAlignment(EntryAI->getAllocatedType()));
263 // Replace this zero-sized alloca with the one at the start of the entry
264 // block after ensuring that the address will be aligned enough for both
266 unsigned MaxAlign = std::max(EntryAI->getAlignment(),
268 EntryAI->setAlignment(MaxAlign);
269 if (AI.getType() != EntryAI->getType())
270 return new BitCastInst(EntryAI, AI.getType());
271 return ReplaceInstUsesWith(AI, EntryAI);
276 if (AI.getAlignment()) {
277 // Check to see if this allocation is only modified by a memcpy/memmove from
278 // a constant global whose alignment is equal to or exceeds that of the
279 // allocation. If this is the case, we can change all users to use
280 // the constant global instead. This is commonly produced by the CFE by
281 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
282 // is only subsequently read.
283 SmallVector<Instruction *, 4> ToDelete;
284 if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
285 unsigned SourceAlign = getOrEnforceKnownAlignment(
286 Copy->getSource(), AI.getAlignment(), DL, &AI, AC, DT);
287 if (AI.getAlignment() <= SourceAlign) {
288 DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
289 DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
290 for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
291 EraseInstFromFunction(*ToDelete[i]);
292 Constant *TheSrc = cast<Constant>(Copy->getSource());
294 = ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, AI.getType());
295 Instruction *NewI = ReplaceInstUsesWith(AI, Cast);
296 EraseInstFromFunction(*Copy);
303 // At last, use the generic allocation site handler to aggressively remove
305 return visitAllocSite(AI);
308 /// \brief Helper to combine a load to a new type.
310 /// This just does the work of combining a load to a new type. It handles
311 /// metadata, etc., and returns the new instruction. The \c NewTy should be the
312 /// loaded *value* type. This will convert it to a pointer, cast the operand to
313 /// that pointer type, load it, etc.
315 /// Note that this will create all of the instructions with whatever insert
316 /// point the \c InstCombiner currently is using.
317 static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy) {
318 Value *Ptr = LI.getPointerOperand();
319 unsigned AS = LI.getPointerAddressSpace();
320 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
321 LI.getAllMetadata(MD);
323 LoadInst *NewLoad = IC.Builder->CreateAlignedLoad(
324 IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
325 LI.getAlignment(), LI.getName());
326 MDBuilder MDB(NewLoad->getContext());
327 for (const auto &MDPair : MD) {
328 unsigned ID = MDPair.first;
329 MDNode *N = MDPair.second;
330 // Note, essentially every kind of metadata should be preserved here! This
331 // routine is supposed to clone a load instruction changing *only its type*.
332 // The only metadata it makes sense to drop is metadata which is invalidated
333 // when the pointer type changes. This should essentially never be the case
334 // in LLVM, but we explicitly switch over only known metadata to be
335 // conservatively correct. If you are adding metadata to LLVM which pertains
336 // to loads, you almost certainly want to add it here.
338 case LLVMContext::MD_dbg:
339 case LLVMContext::MD_tbaa:
340 case LLVMContext::MD_prof:
341 case LLVMContext::MD_fpmath:
342 case LLVMContext::MD_tbaa_struct:
343 case LLVMContext::MD_invariant_load:
344 case LLVMContext::MD_alias_scope:
345 case LLVMContext::MD_noalias:
346 case LLVMContext::MD_nontemporal:
347 case LLVMContext::MD_mem_parallel_loop_access:
348 // All of these directly apply.
349 NewLoad->setMetadata(ID, N);
352 case LLVMContext::MD_nonnull:
353 // This only directly applies if the new type is also a pointer.
354 if (NewTy->isPointerTy()) {
355 NewLoad->setMetadata(ID, N);
358 // If it's integral now, translate it to !range metadata.
359 if (NewTy->isIntegerTy()) {
360 auto *ITy = cast<IntegerType>(NewTy);
361 auto *NullInt = ConstantExpr::getPtrToInt(
362 ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
364 ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
365 NewLoad->setMetadata(LLVMContext::MD_range,
366 MDB.createRange(NonNullInt, NullInt));
370 case LLVMContext::MD_range:
371 // FIXME: It would be nice to propagate this in some way, but the type
372 // conversions make it hard. If the new type is a pointer, we could
373 // translate it to !nonnull metadata.
380 /// \brief Combine a store to a new type.
382 /// Returns the newly created store instruction.
383 static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) {
384 Value *Ptr = SI.getPointerOperand();
385 unsigned AS = SI.getPointerAddressSpace();
386 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
387 SI.getAllMetadata(MD);
389 StoreInst *NewStore = IC.Builder->CreateAlignedStore(
390 V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
392 for (const auto &MDPair : MD) {
393 unsigned ID = MDPair.first;
394 MDNode *N = MDPair.second;
395 // Note, essentially every kind of metadata should be preserved here! This
396 // routine is supposed to clone a store instruction changing *only its
397 // type*. The only metadata it makes sense to drop is metadata which is
398 // invalidated when the pointer type changes. This should essentially
399 // never be the case in LLVM, but we explicitly switch over only known
400 // metadata to be conservatively correct. If you are adding metadata to
401 // LLVM which pertains to stores, you almost certainly want to add it
404 case LLVMContext::MD_dbg:
405 case LLVMContext::MD_tbaa:
406 case LLVMContext::MD_prof:
407 case LLVMContext::MD_fpmath:
408 case LLVMContext::MD_tbaa_struct:
409 case LLVMContext::MD_alias_scope:
410 case LLVMContext::MD_noalias:
411 case LLVMContext::MD_nontemporal:
412 case LLVMContext::MD_mem_parallel_loop_access:
413 // All of these directly apply.
414 NewStore->setMetadata(ID, N);
417 case LLVMContext::MD_invariant_load:
418 case LLVMContext::MD_nonnull:
419 case LLVMContext::MD_range:
420 // These don't apply for stores.
428 /// \brief Combine loads to match the type of value their uses after looking
429 /// through intervening bitcasts.
431 /// The core idea here is that if the result of a load is used in an operation,
432 /// we should load the type most conducive to that operation. For example, when
433 /// loading an integer and converting that immediately to a pointer, we should
434 /// instead directly load a pointer.
436 /// However, this routine must never change the width of a load or the number of
437 /// loads as that would introduce a semantic change. This combine is expected to
438 /// be a semantic no-op which just allows loads to more closely model the types
439 /// of their consuming operations.
441 /// Currently, we also refuse to change the precise type used for an atomic load
442 /// or a volatile load. This is debatable, and might be reasonable to change
443 /// later. However, it is risky in case some backend or other part of LLVM is
444 /// relying on the exact type loaded to select appropriate atomic operations.
445 static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
446 // FIXME: We could probably with some care handle both volatile and atomic
447 // loads here but it isn't clear that this is important.
454 Type *Ty = LI.getType();
455 const DataLayout &DL = IC.getDataLayout();
457 // Try to canonicalize loads which are only ever stored to operate over
458 // integers instead of any other type. We only do this when the loaded type
459 // is sized and has a size exactly the same as its store size and the store
460 // size is a legal integer type.
461 if (!Ty->isIntegerTy() && Ty->isSized() &&
462 DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) &&
463 DL.getTypeStoreSizeInBits(Ty) == DL.getTypeSizeInBits(Ty)) {
464 if (std::all_of(LI.user_begin(), LI.user_end(), [&LI](User *U) {
465 auto *SI = dyn_cast<StoreInst>(U);
466 return SI && SI->getPointerOperand() != &LI;
468 LoadInst *NewLoad = combineLoadToNewType(
470 Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty)));
471 // Replace all the stores with stores of the newly loaded value.
472 for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) {
473 auto *SI = cast<StoreInst>(*UI++);
474 IC.Builder->SetInsertPoint(SI);
475 combineStoreToNewValue(IC, *SI, NewLoad);
476 IC.EraseInstFromFunction(*SI);
478 assert(LI.use_empty() && "Failed to remove all users of the load!");
479 // Return the old load so the combiner can delete it safely.
484 // Fold away bit casts of the loaded value by loading the desired type.
486 if (auto *BC = dyn_cast<BitCastInst>(LI.user_back())) {
487 LoadInst *NewLoad = combineLoadToNewType(IC, LI, BC->getDestTy());
488 BC->replaceAllUsesWith(NewLoad);
489 IC.EraseInstFromFunction(*BC);
493 // FIXME: We should also canonicalize loads of vectors when their elements are
494 // cast to other types.
498 // If we can determine that all possible objects pointed to by the provided
499 // pointer value are, not only dereferenceable, but also definitively less than
500 // or equal to the provided maximum size, then return true. Otherwise, return
501 // false (constant global values and allocas fall into this category).
503 // FIXME: This should probably live in ValueTracking (or similar).
504 static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
505 const DataLayout &DL) {
506 SmallPtrSet<Value *, 4> Visited;
507 SmallVector<Value *, 4> Worklist(1, V);
510 Value *P = Worklist.pop_back_val();
511 P = P->stripPointerCasts();
513 if (!Visited.insert(P).second)
516 if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
517 Worklist.push_back(SI->getTrueValue());
518 Worklist.push_back(SI->getFalseValue());
522 if (PHINode *PN = dyn_cast<PHINode>(P)) {
523 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
524 Worklist.push_back(PN->getIncomingValue(i));
528 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
529 if (GA->mayBeOverridden())
531 Worklist.push_back(GA->getAliasee());
535 // If we know how big this object is, and it is less than MaxSize, continue
536 // searching. Otherwise, return false.
537 if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
538 if (!AI->getAllocatedType()->isSized())
541 ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
545 uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType());
546 // Make sure that, even if the multiplication below would wrap as an
547 // uint64_t, we still do the right thing.
548 if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
553 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
554 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
557 uint64_t InitSize = DL.getTypeAllocSize(GV->getType()->getElementType());
558 if (InitSize > MaxSize)
564 } while (!Worklist.empty());
569 // If we're indexing into an object of a known size, and the outer index is
570 // not a constant, but having any value but zero would lead to undefined
571 // behavior, replace it with zero.
573 // For example, if we have:
574 // @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
576 // %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
577 // ... = load i32* %arrayidx, align 4
578 // Then we know that we can replace %x in the GEP with i64 0.
580 // FIXME: We could fold any GEP index to zero that would cause UB if it were
581 // not zero. Currently, we only handle the first such index. Also, we could
582 // also search through non-zero constant indices if we kept track of the
583 // offsets those indices implied.
584 static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI,
585 Instruction *MemI, unsigned &Idx) {
586 if (GEPI->getNumOperands() < 2)
589 // Find the first non-zero index of a GEP. If all indices are zero, return
590 // one past the last index.
591 auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
593 for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
594 Value *V = GEPI->getOperand(I);
595 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
605 // Skip through initial 'zero' indices, and find the corresponding pointer
606 // type. See if the next index is not a constant.
607 Idx = FirstNZIdx(GEPI);
608 if (Idx == GEPI->getNumOperands())
610 if (isa<Constant>(GEPI->getOperand(Idx)))
613 SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
615 GetElementPtrInst::getIndexedType(GEPI->getOperand(0)->getType(), Ops);
616 if (!AllocTy || !AllocTy->isSized())
618 const DataLayout &DL = IC.getDataLayout();
619 uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy);
621 // If there are more indices after the one we might replace with a zero, make
622 // sure they're all non-negative. If any of them are negative, the overall
623 // address being computed might be before the base address determined by the
624 // first non-zero index.
625 auto IsAllNonNegative = [&]() {
626 for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
627 bool KnownNonNegative, KnownNegative;
628 IC.ComputeSignBit(GEPI->getOperand(i), KnownNonNegative,
629 KnownNegative, 0, MemI);
630 if (KnownNonNegative)
638 // FIXME: If the GEP is not inbounds, and there are extra indices after the
639 // one we'll replace, those could cause the address computation to wrap
640 // (rendering the IsAllNonNegative() check below insufficient). We can do
641 // better, ignoring zero indicies (and other indicies we can prove small
642 // enough not to wrap).
643 if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
646 // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
647 // also known to be dereferenceable.
648 return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
652 // If we're indexing into an object with a variable index for the memory
653 // access, but the object has only one element, we can assume that the index
654 // will always be zero. If we replace the GEP, return it.
655 template <typename T>
656 static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr,
658 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
660 if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
661 Instruction *NewGEPI = GEPI->clone();
662 NewGEPI->setOperand(Idx,
663 ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
664 NewGEPI->insertBefore(GEPI);
665 MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
673 Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
674 Value *Op = LI.getOperand(0);
676 // Try to canonicalize the loaded type.
677 if (Instruction *Res = combineLoadToOperationType(*this, LI))
680 // Attempt to improve the alignment.
681 unsigned KnownAlign = getOrEnforceKnownAlignment(
682 Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, AC, DT);
683 unsigned LoadAlign = LI.getAlignment();
684 unsigned EffectiveLoadAlign =
685 LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType());
687 if (KnownAlign > EffectiveLoadAlign)
688 LI.setAlignment(KnownAlign);
689 else if (LoadAlign == 0)
690 LI.setAlignment(EffectiveLoadAlign);
692 // Replace GEP indices if possible.
693 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) {
694 Worklist.Add(NewGEPI);
698 // None of the following transforms are legal for volatile/atomic loads.
699 // FIXME: Some of it is okay for atomic loads; needs refactoring.
700 if (!LI.isSimple()) return nullptr;
702 // Do really simple store-to-load forwarding and load CSE, to catch cases
703 // where there are several consecutive memory accesses to the same location,
704 // separated by a few arithmetic operations.
705 BasicBlock::iterator BBI = &LI;
706 if (Value *AvailableVal = FindAvailableLoadedValue(Op, LI.getParent(), BBI,6))
707 return ReplaceInstUsesWith(
708 LI, Builder->CreateBitOrPointerCast(AvailableVal, LI.getType(),
709 LI.getName() + ".cast"));
711 // load(gep null, ...) -> unreachable
712 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
713 const Value *GEPI0 = GEPI->getOperand(0);
714 // TODO: Consider a target hook for valid address spaces for this xform.
715 if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
716 // Insert a new store to null instruction before the load to indicate
717 // that this code is not reachable. We do this instead of inserting
718 // an unreachable instruction directly because we cannot modify the
720 new StoreInst(UndefValue::get(LI.getType()),
721 Constant::getNullValue(Op->getType()), &LI);
722 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
726 // load null/undef -> unreachable
727 // TODO: Consider a target hook for valid address spaces for this xform.
728 if (isa<UndefValue>(Op) ||
729 (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) {
730 // Insert a new store to null instruction before the load to indicate that
731 // this code is not reachable. We do this instead of inserting an
732 // unreachable instruction directly because we cannot modify the CFG.
733 new StoreInst(UndefValue::get(LI.getType()),
734 Constant::getNullValue(Op->getType()), &LI);
735 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
738 if (Op->hasOneUse()) {
739 // Change select and PHI nodes to select values instead of addresses: this
740 // helps alias analysis out a lot, allows many others simplifications, and
741 // exposes redundancy in the code.
743 // Note that we cannot do the transformation unless we know that the
744 // introduced loads cannot trap! Something like this is valid as long as
745 // the condition is always false: load (select bool %C, int* null, int* %G),
746 // but it would not be valid if we transformed it to load from null
749 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
750 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
751 unsigned Align = LI.getAlignment();
752 if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align) &&
753 isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align)) {
754 LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
755 SI->getOperand(1)->getName()+".val");
756 LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
757 SI->getOperand(2)->getName()+".val");
758 V1->setAlignment(Align);
759 V2->setAlignment(Align);
760 return SelectInst::Create(SI->getCondition(), V1, V2);
763 // load (select (cond, null, P)) -> load P
764 if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
765 LI.getPointerAddressSpace() == 0) {
766 LI.setOperand(0, SI->getOperand(2));
770 // load (select (cond, P, null)) -> load P
771 if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
772 LI.getPointerAddressSpace() == 0) {
773 LI.setOperand(0, SI->getOperand(1));
781 /// \brief Combine stores to match the type of value being stored.
783 /// The core idea here is that the memory does not have any intrinsic type and
784 /// where we can we should match the type of a store to the type of value being
787 /// However, this routine must never change the width of a store or the number of
788 /// stores as that would introduce a semantic change. This combine is expected to
789 /// be a semantic no-op which just allows stores to more closely model the types
790 /// of their incoming values.
792 /// Currently, we also refuse to change the precise type used for an atomic or
793 /// volatile store. This is debatable, and might be reasonable to change later.
794 /// However, it is risky in case some backend or other part of LLVM is relying
795 /// on the exact type stored to select appropriate atomic operations.
797 /// \returns true if the store was successfully combined away. This indicates
798 /// the caller must erase the store instruction. We have to let the caller erase
799 /// the store instruction sas otherwise there is no way to signal whether it was
800 /// combined or not: IC.EraseInstFromFunction returns a null pointer.
801 static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
802 // FIXME: We could probably with some care handle both volatile and atomic
803 // stores here but it isn't clear that this is important.
807 Value *V = SI.getValueOperand();
809 // Fold away bit casts of the stored value by storing the original type.
810 if (auto *BC = dyn_cast<BitCastInst>(V)) {
811 V = BC->getOperand(0);
812 combineStoreToNewValue(IC, SI, V);
816 // FIXME: We should also canonicalize loads of vectors when their elements are
817 // cast to other types.
821 /// equivalentAddressValues - Test if A and B will obviously have the same
822 /// value. This includes recognizing that %t0 and %t1 will have the same
823 /// value in code like this:
824 /// %t0 = getelementptr \@a, 0, 3
825 /// store i32 0, i32* %t0
826 /// %t1 = getelementptr \@a, 0, 3
827 /// %t2 = load i32* %t1
829 static bool equivalentAddressValues(Value *A, Value *B) {
830 // Test if the values are trivially equivalent.
831 if (A == B) return true;
833 // Test if the values come form identical arithmetic instructions.
834 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
835 // its only used to compare two uses within the same basic block, which
836 // means that they'll always either have the same value or one of them
837 // will have an undefined value.
838 if (isa<BinaryOperator>(A) ||
841 isa<GetElementPtrInst>(A))
842 if (Instruction *BI = dyn_cast<Instruction>(B))
843 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
846 // Otherwise they may not be equivalent.
850 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
851 Value *Val = SI.getOperand(0);
852 Value *Ptr = SI.getOperand(1);
854 // Try to canonicalize the stored type.
855 if (combineStoreToValueType(*this, SI))
856 return EraseInstFromFunction(SI);
858 // Attempt to improve the alignment.
859 unsigned KnownAlign = getOrEnforceKnownAlignment(
860 Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, AC, DT);
861 unsigned StoreAlign = SI.getAlignment();
862 unsigned EffectiveStoreAlign =
863 StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType());
865 if (KnownAlign > EffectiveStoreAlign)
866 SI.setAlignment(KnownAlign);
867 else if (StoreAlign == 0)
868 SI.setAlignment(EffectiveStoreAlign);
870 // Replace GEP indices if possible.
871 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {
872 Worklist.Add(NewGEPI);
876 // Don't hack volatile/atomic stores.
877 // FIXME: Some bits are legal for atomic stores; needs refactoring.
878 if (!SI.isSimple()) return nullptr;
880 // If the RHS is an alloca with a single use, zapify the store, making the
882 if (Ptr->hasOneUse()) {
883 if (isa<AllocaInst>(Ptr))
884 return EraseInstFromFunction(SI);
885 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
886 if (isa<AllocaInst>(GEP->getOperand(0))) {
887 if (GEP->getOperand(0)->hasOneUse())
888 return EraseInstFromFunction(SI);
893 // Do really simple DSE, to catch cases where there are several consecutive
894 // stores to the same location, separated by a few arithmetic operations. This
895 // situation often occurs with bitfield accesses.
896 BasicBlock::iterator BBI = &SI;
897 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
900 // Don't count debug info directives, lest they affect codegen,
901 // and we skip pointer-to-pointer bitcasts, which are NOPs.
902 if (isa<DbgInfoIntrinsic>(BBI) ||
903 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
908 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
909 // Prev store isn't volatile, and stores to the same location?
910 if (PrevSI->isSimple() && equivalentAddressValues(PrevSI->getOperand(1),
914 EraseInstFromFunction(*PrevSI);
920 // If this is a load, we have to stop. However, if the loaded value is from
921 // the pointer we're loading and is producing the pointer we're storing,
922 // then *this* store is dead (X = load P; store X -> P).
923 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
924 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
926 return EraseInstFromFunction(SI);
928 // Otherwise, this is a load from some other location. Stores before it
933 // Don't skip over loads or things that can modify memory.
934 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
938 // store X, null -> turns into 'unreachable' in SimplifyCFG
939 if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
940 if (!isa<UndefValue>(Val)) {
941 SI.setOperand(0, UndefValue::get(Val->getType()));
942 if (Instruction *U = dyn_cast<Instruction>(Val))
943 Worklist.Add(U); // Dropped a use.
945 return nullptr; // Do not modify these!
948 // store undef, Ptr -> noop
949 if (isa<UndefValue>(Val))
950 return EraseInstFromFunction(SI);
952 // If this store is the last instruction in the basic block (possibly
953 // excepting debug info instructions), and if the block ends with an
954 // unconditional branch, try to move it to the successor block.
958 } while (isa<DbgInfoIntrinsic>(BBI) ||
959 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
960 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
961 if (BI->isUnconditional())
962 if (SimplifyStoreAtEndOfBlock(SI))
963 return nullptr; // xform done!
968 /// SimplifyStoreAtEndOfBlock - Turn things like:
969 /// if () { *P = v1; } else { *P = v2 }
970 /// into a phi node with a store in the successor.
972 /// Simplify things like:
973 /// *P = v1; if () { *P = v2; }
974 /// into a phi node with a store in the successor.
976 bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
977 BasicBlock *StoreBB = SI.getParent();
979 // Check to see if the successor block has exactly two incoming edges. If
980 // so, see if the other predecessor contains a store to the same location.
981 // if so, insert a PHI node (if needed) and move the stores down.
982 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
984 // Determine whether Dest has exactly two predecessors and, if so, compute
985 // the other predecessor.
986 pred_iterator PI = pred_begin(DestBB);
988 BasicBlock *OtherBB = nullptr;
993 if (++PI == pred_end(DestBB))
1002 if (++PI != pred_end(DestBB))
1005 // Bail out if all the relevant blocks aren't distinct (this can happen,
1006 // for example, if SI is in an infinite loop)
1007 if (StoreBB == DestBB || OtherBB == DestBB)
1010 // Verify that the other block ends in a branch and is not otherwise empty.
1011 BasicBlock::iterator BBI = OtherBB->getTerminator();
1012 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1013 if (!OtherBr || BBI == OtherBB->begin())
1016 // If the other block ends in an unconditional branch, check for the 'if then
1017 // else' case. there is an instruction before the branch.
1018 StoreInst *OtherStore = nullptr;
1019 if (OtherBr->isUnconditional()) {
1021 // Skip over debugging info.
1022 while (isa<DbgInfoIntrinsic>(BBI) ||
1023 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1024 if (BBI==OtherBB->begin())
1028 // If this isn't a store, isn't a store to the same location, or is not the
1029 // right kind of store, bail out.
1030 OtherStore = dyn_cast<StoreInst>(BBI);
1031 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
1032 !SI.isSameOperationAs(OtherStore))
1035 // Otherwise, the other block ended with a conditional branch. If one of the
1036 // destinations is StoreBB, then we have the if/then case.
1037 if (OtherBr->getSuccessor(0) != StoreBB &&
1038 OtherBr->getSuccessor(1) != StoreBB)
1041 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1042 // if/then triangle. See if there is a store to the same ptr as SI that
1043 // lives in OtherBB.
1045 // Check to see if we find the matching store.
1046 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
1047 if (OtherStore->getOperand(1) != SI.getOperand(1) ||
1048 !SI.isSameOperationAs(OtherStore))
1052 // If we find something that may be using or overwriting the stored
1053 // value, or if we run out of instructions, we can't do the xform.
1054 if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
1055 BBI == OtherBB->begin())
1059 // In order to eliminate the store in OtherBr, we have to
1060 // make sure nothing reads or overwrites the stored value in
1062 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
1063 // FIXME: This should really be AA driven.
1064 if (I->mayReadFromMemory() || I->mayWriteToMemory())
1069 // Insert a PHI node now if we need it.
1070 Value *MergedVal = OtherStore->getOperand(0);
1071 if (MergedVal != SI.getOperand(0)) {
1072 PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
1073 PN->addIncoming(SI.getOperand(0), SI.getParent());
1074 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
1075 MergedVal = InsertNewInstBefore(PN, DestBB->front());
1078 // Advance to a place where it is safe to insert the new store and
1080 BBI = DestBB->getFirstInsertionPt();
1081 StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
1085 SI.getSynchScope());
1086 InsertNewInstBefore(NewSI, *BBI);
1087 NewSI->setDebugLoc(OtherStore->getDebugLoc());
1089 // If the two stores had AA tags, merge them.
1091 SI.getAAMetadata(AATags);
1093 OtherStore->getAAMetadata(AATags, /* Merge = */ true);
1094 NewSI->setAAMetadata(AATags);
1097 // Nuke the old stores.
1098 EraseInstFromFunction(SI);
1099 EraseInstFromFunction(*OtherStore);