1 //===- GlobalOpt.cpp - Optimize Global Variables --------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass transforms simple global variables that never have their address
11 // taken. If obviously true, it marks read/write globals as constant, deletes
12 // variables only stored to, etc.
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Transforms/IPO.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/ConstantFolding.h"
24 #include "llvm/Analysis/MemoryBuiltins.h"
25 #include "llvm/Analysis/TargetLibraryInfo.h"
26 #include "llvm/IR/CallSite.h"
27 #include "llvm/IR/CallingConv.h"
28 #include "llvm/IR/Constants.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/DerivedTypes.h"
31 #include "llvm/IR/Dominators.h"
32 #include "llvm/IR/GetElementPtrTypeIterator.h"
33 #include "llvm/IR/Instructions.h"
34 #include "llvm/IR/IntrinsicInst.h"
35 #include "llvm/IR/Module.h"
36 #include "llvm/IR/Operator.h"
37 #include "llvm/IR/ValueHandle.h"
38 #include "llvm/Pass.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/ErrorHandling.h"
41 #include "llvm/Support/MathExtras.h"
42 #include "llvm/Support/raw_ostream.h"
43 #include "llvm/Transforms/Utils/CtorUtils.h"
44 #include "llvm/Transforms/Utils/GlobalStatus.h"
45 #include "llvm/Transforms/Utils/ModuleUtils.h"
50 #define DEBUG_TYPE "globalopt"
52 STATISTIC(NumMarked , "Number of globals marked constant");
53 STATISTIC(NumUnnamed , "Number of globals marked unnamed_addr");
54 STATISTIC(NumSRA , "Number of aggregate globals broken into scalars");
55 STATISTIC(NumHeapSRA , "Number of heap objects SRA'd");
56 STATISTIC(NumSubstitute,"Number of globals with initializers stored into them");
57 STATISTIC(NumDeleted , "Number of globals deleted");
58 STATISTIC(NumGlobUses , "Number of global uses devirtualized");
59 STATISTIC(NumLocalized , "Number of globals localized");
60 STATISTIC(NumShrunkToBool , "Number of global vars shrunk to booleans");
61 STATISTIC(NumFastCallFns , "Number of functions converted to fastcc");
62 STATISTIC(NumCtorsEvaluated, "Number of static ctors evaluated");
63 STATISTIC(NumNestRemoved , "Number of nest attributes removed");
64 STATISTIC(NumAliasesResolved, "Number of global aliases resolved");
65 STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated");
66 STATISTIC(NumCXXDtorsRemoved, "Number of global C++ destructors removed");
69 struct GlobalOpt : public ModulePass {
70 void getAnalysisUsage(AnalysisUsage &AU) const override {
71 AU.addRequired<TargetLibraryInfoWrapperPass>();
72 AU.addRequired<DominatorTreeWrapperPass>();
74 static char ID; // Pass identification, replacement for typeid
75 GlobalOpt() : ModulePass(ID) {
76 initializeGlobalOptPass(*PassRegistry::getPassRegistry());
79 bool runOnModule(Module &M) override;
82 bool OptimizeFunctions(Module &M);
83 bool OptimizeGlobalVars(Module &M);
84 bool OptimizeGlobalAliases(Module &M);
85 bool deleteIfDead(GlobalValue &GV);
86 bool processGlobal(GlobalValue &GV);
87 bool processInternalGlobal(GlobalVariable *GV, const GlobalStatus &GS);
88 bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn);
90 bool isPointerValueDeadOnEntryToFunction(const Function *F,
93 TargetLibraryInfo *TLI;
94 SmallSet<const Comdat *, 8> NotDiscardableComdats;
98 char GlobalOpt::ID = 0;
99 INITIALIZE_PASS_BEGIN(GlobalOpt, "globalopt",
100 "Global Variable Optimizer", false, false)
101 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
102 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
103 INITIALIZE_PASS_END(GlobalOpt, "globalopt",
104 "Global Variable Optimizer", false, false)
106 ModulePass *llvm::createGlobalOptimizerPass() { return new GlobalOpt(); }
108 /// Is this global variable possibly used by a leak checker as a root? If so,
109 /// we might not really want to eliminate the stores to it.
110 static bool isLeakCheckerRoot(GlobalVariable *GV) {
111 // A global variable is a root if it is a pointer, or could plausibly contain
112 // a pointer. There are two challenges; one is that we could have a struct
113 // the has an inner member which is a pointer. We recurse through the type to
114 // detect these (up to a point). The other is that we may actually be a union
115 // of a pointer and another type, and so our LLVM type is an integer which
116 // gets converted into a pointer, or our type is an [i8 x #] with a pointer
117 // potentially contained here.
119 if (GV->hasPrivateLinkage())
122 SmallVector<Type *, 4> Types;
123 Types.push_back(cast<PointerType>(GV->getType())->getElementType());
127 Type *Ty = Types.pop_back_val();
128 switch (Ty->getTypeID()) {
130 case Type::PointerTyID: return true;
131 case Type::ArrayTyID:
132 case Type::VectorTyID: {
133 SequentialType *STy = cast<SequentialType>(Ty);
134 Types.push_back(STy->getElementType());
137 case Type::StructTyID: {
138 StructType *STy = cast<StructType>(Ty);
139 if (STy->isOpaque()) return true;
140 for (StructType::element_iterator I = STy->element_begin(),
141 E = STy->element_end(); I != E; ++I) {
143 if (isa<PointerType>(InnerTy)) return true;
144 if (isa<CompositeType>(InnerTy))
145 Types.push_back(InnerTy);
150 if (--Limit == 0) return true;
151 } while (!Types.empty());
155 /// Given a value that is stored to a global but never read, determine whether
156 /// it's safe to remove the store and the chain of computation that feeds the
158 static bool IsSafeComputationToRemove(Value *V, const TargetLibraryInfo *TLI) {
160 if (isa<Constant>(V))
164 if (isa<LoadInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V) ||
167 if (isAllocationFn(V, TLI))
170 Instruction *I = cast<Instruction>(V);
171 if (I->mayHaveSideEffects())
173 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
174 if (!GEP->hasAllConstantIndices())
176 } else if (I->getNumOperands() != 1) {
180 V = I->getOperand(0);
184 /// This GV is a pointer root. Loop over all users of the global and clean up
185 /// any that obviously don't assign the global a value that isn't dynamically
187 static bool CleanupPointerRootUsers(GlobalVariable *GV,
188 const TargetLibraryInfo *TLI) {
189 // A brief explanation of leak checkers. The goal is to find bugs where
190 // pointers are forgotten, causing an accumulating growth in memory
191 // usage over time. The common strategy for leak checkers is to whitelist the
192 // memory pointed to by globals at exit. This is popular because it also
193 // solves another problem where the main thread of a C++ program may shut down
194 // before other threads that are still expecting to use those globals. To
195 // handle that case, we expect the program may create a singleton and never
198 bool Changed = false;
200 // If Dead[n].first is the only use of a malloc result, we can delete its
201 // chain of computation and the store to the global in Dead[n].second.
202 SmallVector<std::pair<Instruction *, Instruction *>, 32> Dead;
204 // Constants can't be pointers to dynamically allocated memory.
205 for (Value::user_iterator UI = GV->user_begin(), E = GV->user_end();
208 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
209 Value *V = SI->getValueOperand();
210 if (isa<Constant>(V)) {
212 SI->eraseFromParent();
213 } else if (Instruction *I = dyn_cast<Instruction>(V)) {
215 Dead.push_back(std::make_pair(I, SI));
217 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(U)) {
218 if (isa<Constant>(MSI->getValue())) {
220 MSI->eraseFromParent();
221 } else if (Instruction *I = dyn_cast<Instruction>(MSI->getValue())) {
223 Dead.push_back(std::make_pair(I, MSI));
225 } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(U)) {
226 GlobalVariable *MemSrc = dyn_cast<GlobalVariable>(MTI->getSource());
227 if (MemSrc && MemSrc->isConstant()) {
229 MTI->eraseFromParent();
230 } else if (Instruction *I = dyn_cast<Instruction>(MemSrc)) {
232 Dead.push_back(std::make_pair(I, MTI));
234 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
235 if (CE->use_empty()) {
236 CE->destroyConstant();
239 } else if (Constant *C = dyn_cast<Constant>(U)) {
240 if (isSafeToDestroyConstant(C)) {
241 C->destroyConstant();
242 // This could have invalidated UI, start over from scratch.
244 CleanupPointerRootUsers(GV, TLI);
250 for (int i = 0, e = Dead.size(); i != e; ++i) {
251 if (IsSafeComputationToRemove(Dead[i].first, TLI)) {
252 Dead[i].second->eraseFromParent();
253 Instruction *I = Dead[i].first;
255 if (isAllocationFn(I, TLI))
257 Instruction *J = dyn_cast<Instruction>(I->getOperand(0));
260 I->eraseFromParent();
263 I->eraseFromParent();
270 /// We just marked GV constant. Loop over all users of the global, cleaning up
271 /// the obvious ones. This is largely just a quick scan over the use list to
272 /// clean up the easy and obvious cruft. This returns true if it made a change.
273 static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
274 const DataLayout &DL,
275 TargetLibraryInfo *TLI) {
276 bool Changed = false;
277 // Note that we need to use a weak value handle for the worklist items. When
278 // we delete a constant array, we may also be holding pointer to one of its
279 // elements (or an element of one of its elements if we're dealing with an
280 // array of arrays) in the worklist.
281 SmallVector<WeakVH, 8> WorkList(V->user_begin(), V->user_end());
282 while (!WorkList.empty()) {
283 Value *UV = WorkList.pop_back_val();
287 User *U = cast<User>(UV);
289 if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
291 // Replace the load with the initializer.
292 LI->replaceAllUsesWith(Init);
293 LI->eraseFromParent();
296 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
297 // Store must be unreachable or storing Init into the global.
298 SI->eraseFromParent();
300 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
301 if (CE->getOpcode() == Instruction::GetElementPtr) {
302 Constant *SubInit = nullptr;
304 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
305 Changed |= CleanupConstantGlobalUsers(CE, SubInit, DL, TLI);
306 } else if ((CE->getOpcode() == Instruction::BitCast &&
307 CE->getType()->isPointerTy()) ||
308 CE->getOpcode() == Instruction::AddrSpaceCast) {
309 // Pointer cast, delete any stores and memsets to the global.
310 Changed |= CleanupConstantGlobalUsers(CE, nullptr, DL, TLI);
313 if (CE->use_empty()) {
314 CE->destroyConstant();
317 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
318 // Do not transform "gepinst (gep constexpr (GV))" here, because forming
319 // "gepconstexpr (gep constexpr (GV))" will cause the two gep's to fold
320 // and will invalidate our notion of what Init is.
321 Constant *SubInit = nullptr;
322 if (!isa<ConstantExpr>(GEP->getOperand(0))) {
323 ConstantExpr *CE = dyn_cast_or_null<ConstantExpr>(
324 ConstantFoldInstruction(GEP, DL, TLI));
325 if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr)
326 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
328 // If the initializer is an all-null value and we have an inbounds GEP,
329 // we already know what the result of any load from that GEP is.
330 // TODO: Handle splats.
331 if (Init && isa<ConstantAggregateZero>(Init) && GEP->isInBounds())
332 SubInit = Constant::getNullValue(GEP->getType()->getElementType());
334 Changed |= CleanupConstantGlobalUsers(GEP, SubInit, DL, TLI);
336 if (GEP->use_empty()) {
337 GEP->eraseFromParent();
340 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U)) { // memset/cpy/mv
341 if (MI->getRawDest() == V) {
342 MI->eraseFromParent();
346 } else if (Constant *C = dyn_cast<Constant>(U)) {
347 // If we have a chain of dead constantexprs or other things dangling from
348 // us, and if they are all dead, nuke them without remorse.
349 if (isSafeToDestroyConstant(C)) {
350 C->destroyConstant();
351 CleanupConstantGlobalUsers(V, Init, DL, TLI);
359 /// Return true if the specified instruction is a safe user of a derived
360 /// expression from a global that we want to SROA.
361 static bool isSafeSROAElementUse(Value *V) {
362 // We might have a dead and dangling constant hanging off of here.
363 if (Constant *C = dyn_cast<Constant>(V))
364 return isSafeToDestroyConstant(C);
366 Instruction *I = dyn_cast<Instruction>(V);
367 if (!I) return false;
370 if (isa<LoadInst>(I)) return true;
372 // Stores *to* the pointer are ok.
373 if (StoreInst *SI = dyn_cast<StoreInst>(I))
374 return SI->getOperand(0) != V;
376 // Otherwise, it must be a GEP.
377 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I);
378 if (!GEPI) return false;
380 if (GEPI->getNumOperands() < 3 || !isa<Constant>(GEPI->getOperand(1)) ||
381 !cast<Constant>(GEPI->getOperand(1))->isNullValue())
384 for (User *U : GEPI->users())
385 if (!isSafeSROAElementUse(U))
391 /// U is a direct user of the specified global value. Look at it and its uses
392 /// and decide whether it is safe to SROA this global.
393 static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) {
394 // The user of the global must be a GEP Inst or a ConstantExpr GEP.
395 if (!isa<GetElementPtrInst>(U) &&
396 (!isa<ConstantExpr>(U) ||
397 cast<ConstantExpr>(U)->getOpcode() != Instruction::GetElementPtr))
400 // Check to see if this ConstantExpr GEP is SRA'able. In particular, we
401 // don't like < 3 operand CE's, and we don't like non-constant integer
402 // indices. This enforces that all uses are 'gep GV, 0, C, ...' for some
404 if (U->getNumOperands() < 3 || !isa<Constant>(U->getOperand(1)) ||
405 !cast<Constant>(U->getOperand(1))->isNullValue() ||
406 !isa<ConstantInt>(U->getOperand(2)))
409 gep_type_iterator GEPI = gep_type_begin(U), E = gep_type_end(U);
410 ++GEPI; // Skip over the pointer index.
412 // If this is a use of an array allocation, do a bit more checking for sanity.
413 if (ArrayType *AT = dyn_cast<ArrayType>(*GEPI)) {
414 uint64_t NumElements = AT->getNumElements();
415 ConstantInt *Idx = cast<ConstantInt>(U->getOperand(2));
417 // Check to make sure that index falls within the array. If not,
418 // something funny is going on, so we won't do the optimization.
420 if (Idx->getZExtValue() >= NumElements)
423 // We cannot scalar repl this level of the array unless any array
424 // sub-indices are in-range constants. In particular, consider:
425 // A[0][i]. We cannot know that the user isn't doing invalid things like
426 // allowing i to index an out-of-range subscript that accesses A[1].
428 // Scalar replacing *just* the outer index of the array is probably not
429 // going to be a win anyway, so just give up.
430 for (++GEPI; // Skip array index.
433 uint64_t NumElements;
434 if (ArrayType *SubArrayTy = dyn_cast<ArrayType>(*GEPI))
435 NumElements = SubArrayTy->getNumElements();
436 else if (VectorType *SubVectorTy = dyn_cast<VectorType>(*GEPI))
437 NumElements = SubVectorTy->getNumElements();
439 assert((*GEPI)->isStructTy() &&
440 "Indexed GEP type is not array, vector, or struct!");
444 ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand());
445 if (!IdxVal || IdxVal->getZExtValue() >= NumElements)
450 for (User *UU : U->users())
451 if (!isSafeSROAElementUse(UU))
457 /// Look at all uses of the global and decide whether it is safe for us to
458 /// perform this transformation.
459 static bool GlobalUsersSafeToSRA(GlobalValue *GV) {
460 for (User *U : GV->users())
461 if (!IsUserOfGlobalSafeForSRA(U, GV))
468 /// Perform scalar replacement of aggregates on the specified global variable.
469 /// This opens the door for other optimizations by exposing the behavior of the
470 /// program in a more fine-grained way. We have determined that this
471 /// transformation is safe already. We return the first global variable we
472 /// insert so that the caller can reprocess it.
473 static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) {
474 // Make sure this global only has simple uses that we can SRA.
475 if (!GlobalUsersSafeToSRA(GV))
478 assert(GV->hasLocalLinkage() && !GV->isConstant());
479 Constant *Init = GV->getInitializer();
480 Type *Ty = Init->getType();
482 std::vector<GlobalVariable*> NewGlobals;
483 Module::GlobalListType &Globals = GV->getParent()->getGlobalList();
485 // Get the alignment of the global, either explicit or target-specific.
486 unsigned StartAlignment = GV->getAlignment();
487 if (StartAlignment == 0)
488 StartAlignment = DL.getABITypeAlignment(GV->getType());
490 if (StructType *STy = dyn_cast<StructType>(Ty)) {
491 NewGlobals.reserve(STy->getNumElements());
492 const StructLayout &Layout = *DL.getStructLayout(STy);
493 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
494 Constant *In = Init->getAggregateElement(i);
495 assert(In && "Couldn't get element of initializer?");
496 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(i), false,
497 GlobalVariable::InternalLinkage,
498 In, GV->getName()+"."+Twine(i),
499 GV->getThreadLocalMode(),
500 GV->getType()->getAddressSpace());
501 NGV->setExternallyInitialized(GV->isExternallyInitialized());
502 Globals.push_back(NGV);
503 NewGlobals.push_back(NGV);
505 // Calculate the known alignment of the field. If the original aggregate
506 // had 256 byte alignment for example, something might depend on that:
507 // propagate info to each field.
508 uint64_t FieldOffset = Layout.getElementOffset(i);
509 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, FieldOffset);
510 if (NewAlign > DL.getABITypeAlignment(STy->getElementType(i)))
511 NGV->setAlignment(NewAlign);
513 } else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) {
514 unsigned NumElements = 0;
515 if (ArrayType *ATy = dyn_cast<ArrayType>(STy))
516 NumElements = ATy->getNumElements();
518 NumElements = cast<VectorType>(STy)->getNumElements();
520 if (NumElements > 16 && GV->hasNUsesOrMore(16))
521 return nullptr; // It's not worth it.
522 NewGlobals.reserve(NumElements);
524 uint64_t EltSize = DL.getTypeAllocSize(STy->getElementType());
525 unsigned EltAlign = DL.getABITypeAlignment(STy->getElementType());
526 for (unsigned i = 0, e = NumElements; i != e; ++i) {
527 Constant *In = Init->getAggregateElement(i);
528 assert(In && "Couldn't get element of initializer?");
530 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(), false,
531 GlobalVariable::InternalLinkage,
532 In, GV->getName()+"."+Twine(i),
533 GV->getThreadLocalMode(),
534 GV->getType()->getAddressSpace());
535 NGV->setExternallyInitialized(GV->isExternallyInitialized());
536 Globals.push_back(NGV);
537 NewGlobals.push_back(NGV);
539 // Calculate the known alignment of the field. If the original aggregate
540 // had 256 byte alignment for example, something might depend on that:
541 // propagate info to each field.
542 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, EltSize*i);
543 if (NewAlign > EltAlign)
544 NGV->setAlignment(NewAlign);
548 if (NewGlobals.empty())
551 DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV << "\n");
553 Constant *NullInt =Constant::getNullValue(Type::getInt32Ty(GV->getContext()));
555 // Loop over all of the uses of the global, replacing the constantexpr geps,
556 // with smaller constantexpr geps or direct references.
557 while (!GV->use_empty()) {
558 User *GEP = GV->user_back();
559 assert(((isa<ConstantExpr>(GEP) &&
560 cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)||
561 isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!");
563 // Ignore the 1th operand, which has to be zero or else the program is quite
564 // broken (undefined). Get the 2nd operand, which is the structure or array
566 unsigned Val = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue();
567 if (Val >= NewGlobals.size()) Val = 0; // Out of bound array access.
569 Value *NewPtr = NewGlobals[Val];
570 Type *NewTy = NewGlobals[Val]->getValueType();
572 // Form a shorter GEP if needed.
573 if (GEP->getNumOperands() > 3) {
574 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GEP)) {
575 SmallVector<Constant*, 8> Idxs;
576 Idxs.push_back(NullInt);
577 for (unsigned i = 3, e = CE->getNumOperands(); i != e; ++i)
578 Idxs.push_back(CE->getOperand(i));
580 ConstantExpr::getGetElementPtr(NewTy, cast<Constant>(NewPtr), Idxs);
582 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(GEP);
583 SmallVector<Value*, 8> Idxs;
584 Idxs.push_back(NullInt);
585 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i)
586 Idxs.push_back(GEPI->getOperand(i));
587 NewPtr = GetElementPtrInst::Create(
588 NewTy, NewPtr, Idxs, GEPI->getName() + "." + Twine(Val), GEPI);
591 GEP->replaceAllUsesWith(NewPtr);
593 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(GEP))
594 GEPI->eraseFromParent();
596 cast<ConstantExpr>(GEP)->destroyConstant();
599 // Delete the old global, now that it is dead.
603 // Loop over the new globals array deleting any globals that are obviously
604 // dead. This can arise due to scalarization of a structure or an array that
605 // has elements that are dead.
606 unsigned FirstGlobal = 0;
607 for (unsigned i = 0, e = NewGlobals.size(); i != e; ++i)
608 if (NewGlobals[i]->use_empty()) {
609 Globals.erase(NewGlobals[i]);
610 if (FirstGlobal == i) ++FirstGlobal;
613 return FirstGlobal != NewGlobals.size() ? NewGlobals[FirstGlobal] : nullptr;
616 /// Return true if all users of the specified value will trap if the value is
617 /// dynamically null. PHIs keeps track of any phi nodes we've seen to avoid
618 /// reprocessing them.
619 static bool AllUsesOfValueWillTrapIfNull(const Value *V,
620 SmallPtrSetImpl<const PHINode*> &PHIs) {
621 for (const User *U : V->users())
622 if (isa<LoadInst>(U)) {
624 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
625 if (SI->getOperand(0) == V) {
626 //cerr << "NONTRAPPING USE: " << *U;
627 return false; // Storing the value.
629 } else if (const CallInst *CI = dyn_cast<CallInst>(U)) {
630 if (CI->getCalledValue() != V) {
631 //cerr << "NONTRAPPING USE: " << *U;
632 return false; // Not calling the ptr
634 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(U)) {
635 if (II->getCalledValue() != V) {
636 //cerr << "NONTRAPPING USE: " << *U;
637 return false; // Not calling the ptr
639 } else if (const BitCastInst *CI = dyn_cast<BitCastInst>(U)) {
640 if (!AllUsesOfValueWillTrapIfNull(CI, PHIs)) return false;
641 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
642 if (!AllUsesOfValueWillTrapIfNull(GEPI, PHIs)) return false;
643 } else if (const PHINode *PN = dyn_cast<PHINode>(U)) {
644 // If we've already seen this phi node, ignore it, it has already been
646 if (PHIs.insert(PN).second && !AllUsesOfValueWillTrapIfNull(PN, PHIs))
648 } else if (isa<ICmpInst>(U) &&
649 isa<ConstantPointerNull>(U->getOperand(1))) {
650 // Ignore icmp X, null
652 //cerr << "NONTRAPPING USE: " << *U;
659 /// Return true if all uses of any loads from GV will trap if the loaded value
660 /// is null. Note that this also permits comparisons of the loaded value
661 /// against null, as a special case.
662 static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) {
663 for (const User *U : GV->users())
664 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
665 SmallPtrSet<const PHINode*, 8> PHIs;
666 if (!AllUsesOfValueWillTrapIfNull(LI, PHIs))
668 } else if (isa<StoreInst>(U)) {
669 // Ignore stores to the global.
671 // We don't know or understand this user, bail out.
672 //cerr << "UNKNOWN USER OF GLOBAL!: " << *U;
678 static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) {
679 bool Changed = false;
680 for (auto UI = V->user_begin(), E = V->user_end(); UI != E; ) {
681 Instruction *I = cast<Instruction>(*UI++);
682 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
683 LI->setOperand(0, NewV);
685 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
686 if (SI->getOperand(1) == V) {
687 SI->setOperand(1, NewV);
690 } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
692 if (CS.getCalledValue() == V) {
693 // Calling through the pointer! Turn into a direct call, but be careful
694 // that the pointer is not also being passed as an argument.
695 CS.setCalledFunction(NewV);
697 bool PassedAsArg = false;
698 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
699 if (CS.getArgument(i) == V) {
701 CS.setArgument(i, NewV);
705 // Being passed as an argument also. Be careful to not invalidate UI!
706 UI = V->user_begin();
709 } else if (CastInst *CI = dyn_cast<CastInst>(I)) {
710 Changed |= OptimizeAwayTrappingUsesOfValue(CI,
711 ConstantExpr::getCast(CI->getOpcode(),
712 NewV, CI->getType()));
713 if (CI->use_empty()) {
715 CI->eraseFromParent();
717 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
718 // Should handle GEP here.
719 SmallVector<Constant*, 8> Idxs;
720 Idxs.reserve(GEPI->getNumOperands()-1);
721 for (User::op_iterator i = GEPI->op_begin() + 1, e = GEPI->op_end();
723 if (Constant *C = dyn_cast<Constant>(*i))
727 if (Idxs.size() == GEPI->getNumOperands()-1)
728 Changed |= OptimizeAwayTrappingUsesOfValue(
729 GEPI, ConstantExpr::getGetElementPtr(nullptr, NewV, Idxs));
730 if (GEPI->use_empty()) {
732 GEPI->eraseFromParent();
741 /// The specified global has only one non-null value stored into it. If there
742 /// are uses of the loaded value that would trap if the loaded value is
743 /// dynamically null, then we know that they cannot be reachable with a null
744 /// optimize away the load.
745 static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
746 const DataLayout &DL,
747 TargetLibraryInfo *TLI) {
748 bool Changed = false;
750 // Keep track of whether we are able to remove all the uses of the global
751 // other than the store that defines it.
752 bool AllNonStoreUsesGone = true;
754 // Replace all uses of loads with uses of uses of the stored value.
755 for (Value::user_iterator GUI = GV->user_begin(), E = GV->user_end(); GUI != E;){
756 User *GlobalUser = *GUI++;
757 if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) {
758 Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV);
759 // If we were able to delete all uses of the loads
760 if (LI->use_empty()) {
761 LI->eraseFromParent();
764 AllNonStoreUsesGone = false;
766 } else if (isa<StoreInst>(GlobalUser)) {
767 // Ignore the store that stores "LV" to the global.
768 assert(GlobalUser->getOperand(1) == GV &&
769 "Must be storing *to* the global");
771 AllNonStoreUsesGone = false;
773 // If we get here we could have other crazy uses that are transitively
775 assert((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) ||
776 isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) ||
777 isa<BitCastInst>(GlobalUser) ||
778 isa<GetElementPtrInst>(GlobalUser)) &&
779 "Only expect load and stores!");
784 DEBUG(dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV << "\n");
788 // If we nuked all of the loads, then none of the stores are needed either,
789 // nor is the global.
790 if (AllNonStoreUsesGone) {
791 if (isLeakCheckerRoot(GV)) {
792 Changed |= CleanupPointerRootUsers(GV, TLI);
795 CleanupConstantGlobalUsers(GV, nullptr, DL, TLI);
797 if (GV->use_empty()) {
798 DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n");
800 GV->eraseFromParent();
807 /// Walk the use list of V, constant folding all of the instructions that are
809 static void ConstantPropUsersOf(Value *V, const DataLayout &DL,
810 TargetLibraryInfo *TLI) {
811 for (Value::user_iterator UI = V->user_begin(), E = V->user_end(); UI != E; )
812 if (Instruction *I = dyn_cast<Instruction>(*UI++))
813 if (Constant *NewC = ConstantFoldInstruction(I, DL, TLI)) {
814 I->replaceAllUsesWith(NewC);
816 // Advance UI to the next non-I use to avoid invalidating it!
817 // Instructions could multiply use V.
818 while (UI != E && *UI == I)
820 I->eraseFromParent();
824 /// This function takes the specified global variable, and transforms the
825 /// program as if it always contained the result of the specified malloc.
826 /// Because it is always the result of the specified malloc, there is no reason
827 /// to actually DO the malloc. Instead, turn the malloc into a global, and any
828 /// loads of GV as uses of the new global.
829 static GlobalVariable *
830 OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, CallInst *CI, Type *AllocTy,
831 ConstantInt *NElements, const DataLayout &DL,
832 TargetLibraryInfo *TLI) {
833 DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n');
836 if (NElements->getZExtValue() == 1)
837 GlobalType = AllocTy;
839 // If we have an array allocation, the global variable is of an array.
840 GlobalType = ArrayType::get(AllocTy, NElements->getZExtValue());
842 // Create the new global variable. The contents of the malloc'd memory is
843 // undefined, so initialize with an undef value.
844 GlobalVariable *NewGV = new GlobalVariable(
845 *GV->getParent(), GlobalType, false, GlobalValue::InternalLinkage,
846 UndefValue::get(GlobalType), GV->getName() + ".body", nullptr,
847 GV->getThreadLocalMode());
849 // If there are bitcast users of the malloc (which is typical, usually we have
850 // a malloc + bitcast) then replace them with uses of the new global. Update
851 // other users to use the global as well.
852 BitCastInst *TheBC = nullptr;
853 while (!CI->use_empty()) {
854 Instruction *User = cast<Instruction>(CI->user_back());
855 if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) {
856 if (BCI->getType() == NewGV->getType()) {
857 BCI->replaceAllUsesWith(NewGV);
858 BCI->eraseFromParent();
860 BCI->setOperand(0, NewGV);
864 TheBC = new BitCastInst(NewGV, CI->getType(), "newgv", CI);
865 User->replaceUsesOfWith(CI, TheBC);
869 Constant *RepValue = NewGV;
870 if (NewGV->getType() != GV->getType()->getElementType())
871 RepValue = ConstantExpr::getBitCast(RepValue,
872 GV->getType()->getElementType());
874 // If there is a comparison against null, we will insert a global bool to
875 // keep track of whether the global was initialized yet or not.
876 GlobalVariable *InitBool =
877 new GlobalVariable(Type::getInt1Ty(GV->getContext()), false,
878 GlobalValue::InternalLinkage,
879 ConstantInt::getFalse(GV->getContext()),
880 GV->getName()+".init", GV->getThreadLocalMode());
881 bool InitBoolUsed = false;
883 // Loop over all uses of GV, processing them in turn.
884 while (!GV->use_empty()) {
885 if (StoreInst *SI = dyn_cast<StoreInst>(GV->user_back())) {
886 // The global is initialized when the store to it occurs.
887 new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, false, 0,
888 SI->getOrdering(), SI->getSynchScope(), SI);
889 SI->eraseFromParent();
893 LoadInst *LI = cast<LoadInst>(GV->user_back());
894 while (!LI->use_empty()) {
895 Use &LoadUse = *LI->use_begin();
896 ICmpInst *ICI = dyn_cast<ICmpInst>(LoadUse.getUser());
902 // Replace the cmp X, 0 with a use of the bool value.
903 // Sink the load to where the compare was, if atomic rules allow us to.
904 Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", false, 0,
905 LI->getOrdering(), LI->getSynchScope(),
906 LI->isUnordered() ? (Instruction*)ICI : LI);
908 switch (ICI->getPredicate()) {
909 default: llvm_unreachable("Unknown ICmp Predicate!");
910 case ICmpInst::ICMP_ULT:
911 case ICmpInst::ICMP_SLT: // X < null -> always false
912 LV = ConstantInt::getFalse(GV->getContext());
914 case ICmpInst::ICMP_ULE:
915 case ICmpInst::ICMP_SLE:
916 case ICmpInst::ICMP_EQ:
917 LV = BinaryOperator::CreateNot(LV, "notinit", ICI);
919 case ICmpInst::ICMP_NE:
920 case ICmpInst::ICMP_UGE:
921 case ICmpInst::ICMP_SGE:
922 case ICmpInst::ICMP_UGT:
923 case ICmpInst::ICMP_SGT:
926 ICI->replaceAllUsesWith(LV);
927 ICI->eraseFromParent();
929 LI->eraseFromParent();
932 // If the initialization boolean was used, insert it, otherwise delete it.
934 while (!InitBool->use_empty()) // Delete initializations
935 cast<StoreInst>(InitBool->user_back())->eraseFromParent();
938 GV->getParent()->getGlobalList().insert(GV->getIterator(), InitBool);
940 // Now the GV is dead, nuke it and the malloc..
941 GV->eraseFromParent();
942 CI->eraseFromParent();
944 // To further other optimizations, loop over all users of NewGV and try to
945 // constant prop them. This will promote GEP instructions with constant
946 // indices into GEP constant-exprs, which will allow global-opt to hack on it.
947 ConstantPropUsersOf(NewGV, DL, TLI);
948 if (RepValue != NewGV)
949 ConstantPropUsersOf(RepValue, DL, TLI);
954 /// Scan the use-list of V checking to make sure that there are no complex uses
955 /// of V. We permit simple things like dereferencing the pointer, but not
956 /// storing through the address, unless it is to the specified global.
957 static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V,
958 const GlobalVariable *GV,
959 SmallPtrSetImpl<const PHINode*> &PHIs) {
960 for (const User *U : V->users()) {
961 const Instruction *Inst = cast<Instruction>(U);
963 if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) {
964 continue; // Fine, ignore.
967 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
968 if (SI->getOperand(0) == V && SI->getOperand(1) != GV)
969 return false; // Storing the pointer itself... bad.
970 continue; // Otherwise, storing through it, or storing into GV... fine.
973 // Must index into the array and into the struct.
974 if (isa<GetElementPtrInst>(Inst) && Inst->getNumOperands() >= 3) {
975 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Inst, GV, PHIs))
980 if (const PHINode *PN = dyn_cast<PHINode>(Inst)) {
981 // PHIs are ok if all uses are ok. Don't infinitely recurse through PHI
983 if (PHIs.insert(PN).second)
984 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(PN, GV, PHIs))
989 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Inst)) {
990 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(BCI, GV, PHIs))
1000 /// The Alloc pointer is stored into GV somewhere. Transform all uses of the
1001 /// allocation into loads from the global and uses of the resultant pointer.
1002 /// Further, delete the store into GV. This assumes that these value pass the
1003 /// 'ValueIsOnlyUsedLocallyOrStoredToOneGlobal' predicate.
1004 static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc,
1005 GlobalVariable *GV) {
1006 while (!Alloc->use_empty()) {
1007 Instruction *U = cast<Instruction>(*Alloc->user_begin());
1008 Instruction *InsertPt = U;
1009 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1010 // If this is the store of the allocation into the global, remove it.
1011 if (SI->getOperand(1) == GV) {
1012 SI->eraseFromParent();
1015 } else if (PHINode *PN = dyn_cast<PHINode>(U)) {
1016 // Insert the load in the corresponding predecessor, not right before the
1018 InsertPt = PN->getIncomingBlock(*Alloc->use_begin())->getTerminator();
1019 } else if (isa<BitCastInst>(U)) {
1020 // Must be bitcast between the malloc and store to initialize the global.
1021 ReplaceUsesOfMallocWithGlobal(U, GV);
1022 U->eraseFromParent();
1024 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
1025 // If this is a "GEP bitcast" and the user is a store to the global, then
1026 // just process it as a bitcast.
1027 if (GEPI->hasAllZeroIndices() && GEPI->hasOneUse())
1028 if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->user_back()))
1029 if (SI->getOperand(1) == GV) {
1030 // Must be bitcast GEP between the malloc and store to initialize
1032 ReplaceUsesOfMallocWithGlobal(GEPI, GV);
1033 GEPI->eraseFromParent();
1038 // Insert a load from the global, and use it instead of the malloc.
1039 Value *NL = new LoadInst(GV, GV->getName()+".val", InsertPt);
1040 U->replaceUsesOfWith(Alloc, NL);
1044 /// Verify that all uses of V (a load, or a phi of a load) are simple enough to
1045 /// perform heap SRA on. This permits GEP's that index through the array and
1046 /// struct field, icmps of null, and PHIs.
1047 static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V,
1048 SmallPtrSetImpl<const PHINode*> &LoadUsingPHIs,
1049 SmallPtrSetImpl<const PHINode*> &LoadUsingPHIsPerLoad) {
1050 // We permit two users of the load: setcc comparing against the null
1051 // pointer, and a getelementptr of a specific form.
1052 for (const User *U : V->users()) {
1053 const Instruction *UI = cast<Instruction>(U);
1055 // Comparison against null is ok.
1056 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UI)) {
1057 if (!isa<ConstantPointerNull>(ICI->getOperand(1)))
1062 // getelementptr is also ok, but only a simple form.
1063 if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(UI)) {
1064 // Must index into the array and into the struct.
1065 if (GEPI->getNumOperands() < 3)
1068 // Otherwise the GEP is ok.
1072 if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
1073 if (!LoadUsingPHIsPerLoad.insert(PN).second)
1074 // This means some phi nodes are dependent on each other.
1075 // Avoid infinite looping!
1077 if (!LoadUsingPHIs.insert(PN).second)
1078 // If we have already analyzed this PHI, then it is safe.
1081 // Make sure all uses of the PHI are simple enough to transform.
1082 if (!LoadUsesSimpleEnoughForHeapSRA(PN,
1083 LoadUsingPHIs, LoadUsingPHIsPerLoad))
1089 // Otherwise we don't know what this is, not ok.
1097 /// If all users of values loaded from GV are simple enough to perform HeapSRA,
1099 static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable *GV,
1100 Instruction *StoredVal) {
1101 SmallPtrSet<const PHINode*, 32> LoadUsingPHIs;
1102 SmallPtrSet<const PHINode*, 32> LoadUsingPHIsPerLoad;
1103 for (const User *U : GV->users())
1104 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
1105 if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs,
1106 LoadUsingPHIsPerLoad))
1108 LoadUsingPHIsPerLoad.clear();
1111 // If we reach here, we know that all uses of the loads and transitive uses
1112 // (through PHI nodes) are simple enough to transform. However, we don't know
1113 // that all inputs the to the PHI nodes are in the same equivalence sets.
1114 // Check to verify that all operands of the PHIs are either PHIS that can be
1115 // transformed, loads from GV, or MI itself.
1116 for (const PHINode *PN : LoadUsingPHIs) {
1117 for (unsigned op = 0, e = PN->getNumIncomingValues(); op != e; ++op) {
1118 Value *InVal = PN->getIncomingValue(op);
1120 // PHI of the stored value itself is ok.
1121 if (InVal == StoredVal) continue;
1123 if (const PHINode *InPN = dyn_cast<PHINode>(InVal)) {
1124 // One of the PHIs in our set is (optimistically) ok.
1125 if (LoadUsingPHIs.count(InPN))
1130 // Load from GV is ok.
1131 if (const LoadInst *LI = dyn_cast<LoadInst>(InVal))
1132 if (LI->getOperand(0) == GV)
1137 // Anything else is rejected.
1145 static Value *GetHeapSROAValue(Value *V, unsigned FieldNo,
1146 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
1147 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) {
1148 std::vector<Value*> &FieldVals = InsertedScalarizedValues[V];
1150 if (FieldNo >= FieldVals.size())
1151 FieldVals.resize(FieldNo+1);
1153 // If we already have this value, just reuse the previously scalarized
1155 if (Value *FieldVal = FieldVals[FieldNo])
1158 // Depending on what instruction this is, we have several cases.
1160 if (LoadInst *LI = dyn_cast<LoadInst>(V)) {
1161 // This is a scalarized version of the load from the global. Just create
1162 // a new Load of the scalarized global.
1163 Result = new LoadInst(GetHeapSROAValue(LI->getOperand(0), FieldNo,
1164 InsertedScalarizedValues,
1166 LI->getName()+".f"+Twine(FieldNo), LI);
1168 PHINode *PN = cast<PHINode>(V);
1169 // PN's type is pointer to struct. Make a new PHI of pointer to struct
1172 PointerType *PTy = cast<PointerType>(PN->getType());
1173 StructType *ST = cast<StructType>(PTy->getElementType());
1175 unsigned AS = PTy->getAddressSpace();
1177 PHINode::Create(PointerType::get(ST->getElementType(FieldNo), AS),
1178 PN->getNumIncomingValues(),
1179 PN->getName()+".f"+Twine(FieldNo), PN);
1181 PHIsToRewrite.push_back(std::make_pair(PN, FieldNo));
1184 return FieldVals[FieldNo] = Result;
1187 /// Given a load instruction and a value derived from the load, rewrite the
1188 /// derived value to use the HeapSRoA'd load.
1189 static void RewriteHeapSROALoadUser(Instruction *LoadUser,
1190 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
1191 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) {
1192 // If this is a comparison against null, handle it.
1193 if (ICmpInst *SCI = dyn_cast<ICmpInst>(LoadUser)) {
1194 assert(isa<ConstantPointerNull>(SCI->getOperand(1)));
1195 // If we have a setcc of the loaded pointer, we can use a setcc of any
1197 Value *NPtr = GetHeapSROAValue(SCI->getOperand(0), 0,
1198 InsertedScalarizedValues, PHIsToRewrite);
1200 Value *New = new ICmpInst(SCI, SCI->getPredicate(), NPtr,
1201 Constant::getNullValue(NPtr->getType()),
1203 SCI->replaceAllUsesWith(New);
1204 SCI->eraseFromParent();
1208 // Handle 'getelementptr Ptr, Idx, i32 FieldNo ...'
1209 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(LoadUser)) {
1210 assert(GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2))
1211 && "Unexpected GEPI!");
1213 // Load the pointer for this field.
1214 unsigned FieldNo = cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue();
1215 Value *NewPtr = GetHeapSROAValue(GEPI->getOperand(0), FieldNo,
1216 InsertedScalarizedValues, PHIsToRewrite);
1218 // Create the new GEP idx vector.
1219 SmallVector<Value*, 8> GEPIdx;
1220 GEPIdx.push_back(GEPI->getOperand(1));
1221 GEPIdx.append(GEPI->op_begin()+3, GEPI->op_end());
1223 Value *NGEPI = GetElementPtrInst::Create(GEPI->getResultElementType(), NewPtr, GEPIdx,
1224 GEPI->getName(), GEPI);
1225 GEPI->replaceAllUsesWith(NGEPI);
1226 GEPI->eraseFromParent();
1230 // Recursively transform the users of PHI nodes. This will lazily create the
1231 // PHIs that are needed for individual elements. Keep track of what PHIs we
1232 // see in InsertedScalarizedValues so that we don't get infinite loops (very
1233 // antisocial). If the PHI is already in InsertedScalarizedValues, it has
1234 // already been seen first by another load, so its uses have already been
1236 PHINode *PN = cast<PHINode>(LoadUser);
1237 if (!InsertedScalarizedValues.insert(std::make_pair(PN,
1238 std::vector<Value*>())).second)
1241 // If this is the first time we've seen this PHI, recursively process all
1243 for (auto UI = PN->user_begin(), E = PN->user_end(); UI != E;) {
1244 Instruction *User = cast<Instruction>(*UI++);
1245 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite);
1249 /// We are performing Heap SRoA on a global. Ptr is a value loaded from the
1250 /// global. Eliminate all uses of Ptr, making them use FieldGlobals instead.
1251 /// All uses of loaded values satisfy AllGlobalLoadUsesSimpleEnoughForHeapSRA.
1252 static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
1253 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
1254 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) {
1255 for (auto UI = Load->user_begin(), E = Load->user_end(); UI != E;) {
1256 Instruction *User = cast<Instruction>(*UI++);
1257 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite);
1260 if (Load->use_empty()) {
1261 Load->eraseFromParent();
1262 InsertedScalarizedValues.erase(Load);
1266 /// CI is an allocation of an array of structures. Break it up into multiple
1267 /// allocations of arrays of the fields.
1268 static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
1269 Value *NElems, const DataLayout &DL,
1270 const TargetLibraryInfo *TLI) {
1271 DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n');
1272 Type *MAT = getMallocAllocatedType(CI, TLI);
1273 StructType *STy = cast<StructType>(MAT);
1275 // There is guaranteed to be at least one use of the malloc (storing
1276 // it into GV). If there are other uses, change them to be uses of
1277 // the global to simplify later code. This also deletes the store
1279 ReplaceUsesOfMallocWithGlobal(CI, GV);
1281 // Okay, at this point, there are no users of the malloc. Insert N
1282 // new mallocs at the same place as CI, and N globals.
1283 std::vector<Value*> FieldGlobals;
1284 std::vector<Value*> FieldMallocs;
1286 unsigned AS = GV->getType()->getPointerAddressSpace();
1287 for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){
1288 Type *FieldTy = STy->getElementType(FieldNo);
1289 PointerType *PFieldTy = PointerType::get(FieldTy, AS);
1291 GlobalVariable *NGV = new GlobalVariable(
1292 *GV->getParent(), PFieldTy, false, GlobalValue::InternalLinkage,
1293 Constant::getNullValue(PFieldTy), GV->getName() + ".f" + Twine(FieldNo),
1294 nullptr, GV->getThreadLocalMode());
1295 FieldGlobals.push_back(NGV);
1297 unsigned TypeSize = DL.getTypeAllocSize(FieldTy);
1298 if (StructType *ST = dyn_cast<StructType>(FieldTy))
1299 TypeSize = DL.getStructLayout(ST)->getSizeInBytes();
1300 Type *IntPtrTy = DL.getIntPtrType(CI->getType());
1301 Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy,
1302 ConstantInt::get(IntPtrTy, TypeSize),
1304 CI->getName() + ".f" + Twine(FieldNo));
1305 FieldMallocs.push_back(NMI);
1306 new StoreInst(NMI, NGV, CI);
1309 // The tricky aspect of this transformation is handling the case when malloc
1310 // fails. In the original code, malloc failing would set the result pointer
1311 // of malloc to null. In this case, some mallocs could succeed and others
1312 // could fail. As such, we emit code that looks like this:
1313 // F0 = malloc(field0)
1314 // F1 = malloc(field1)
1315 // F2 = malloc(field2)
1316 // if (F0 == 0 || F1 == 0 || F2 == 0) {
1317 // if (F0) { free(F0); F0 = 0; }
1318 // if (F1) { free(F1); F1 = 0; }
1319 // if (F2) { free(F2); F2 = 0; }
1321 // The malloc can also fail if its argument is too large.
1322 Constant *ConstantZero = ConstantInt::get(CI->getArgOperand(0)->getType(), 0);
1323 Value *RunningOr = new ICmpInst(CI, ICmpInst::ICMP_SLT, CI->getArgOperand(0),
1324 ConstantZero, "isneg");
1325 for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) {
1326 Value *Cond = new ICmpInst(CI, ICmpInst::ICMP_EQ, FieldMallocs[i],
1327 Constant::getNullValue(FieldMallocs[i]->getType()),
1329 RunningOr = BinaryOperator::CreateOr(RunningOr, Cond, "tmp", CI);
1332 // Split the basic block at the old malloc.
1333 BasicBlock *OrigBB = CI->getParent();
1334 BasicBlock *ContBB =
1335 OrigBB->splitBasicBlock(CI->getIterator(), "malloc_cont");
1337 // Create the block to check the first condition. Put all these blocks at the
1338 // end of the function as they are unlikely to be executed.
1339 BasicBlock *NullPtrBlock = BasicBlock::Create(OrigBB->getContext(),
1341 OrigBB->getParent());
1343 // Remove the uncond branch from OrigBB to ContBB, turning it into a cond
1344 // branch on RunningOr.
1345 OrigBB->getTerminator()->eraseFromParent();
1346 BranchInst::Create(NullPtrBlock, ContBB, RunningOr, OrigBB);
1348 // Within the NullPtrBlock, we need to emit a comparison and branch for each
1349 // pointer, because some may be null while others are not.
1350 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
1351 Value *GVVal = new LoadInst(FieldGlobals[i], "tmp", NullPtrBlock);
1352 Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal,
1353 Constant::getNullValue(GVVal->getType()));
1354 BasicBlock *FreeBlock = BasicBlock::Create(Cmp->getContext(), "free_it",
1355 OrigBB->getParent());
1356 BasicBlock *NextBlock = BasicBlock::Create(Cmp->getContext(), "next",
1357 OrigBB->getParent());
1358 Instruction *BI = BranchInst::Create(FreeBlock, NextBlock,
1361 // Fill in FreeBlock.
1362 CallInst::CreateFree(GVVal, BI);
1363 new StoreInst(Constant::getNullValue(GVVal->getType()), FieldGlobals[i],
1365 BranchInst::Create(NextBlock, FreeBlock);
1367 NullPtrBlock = NextBlock;
1370 BranchInst::Create(ContBB, NullPtrBlock);
1372 // CI is no longer needed, remove it.
1373 CI->eraseFromParent();
1375 /// As we process loads, if we can't immediately update all uses of the load,
1376 /// keep track of what scalarized loads are inserted for a given load.
1377 DenseMap<Value*, std::vector<Value*> > InsertedScalarizedValues;
1378 InsertedScalarizedValues[GV] = FieldGlobals;
1380 std::vector<std::pair<PHINode*, unsigned> > PHIsToRewrite;
1382 // Okay, the malloc site is completely handled. All of the uses of GV are now
1383 // loads, and all uses of those loads are simple. Rewrite them to use loads
1384 // of the per-field globals instead.
1385 for (auto UI = GV->user_begin(), E = GV->user_end(); UI != E;) {
1386 Instruction *User = cast<Instruction>(*UI++);
1388 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
1389 RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite);
1393 // Must be a store of null.
1394 StoreInst *SI = cast<StoreInst>(User);
1395 assert(isa<ConstantPointerNull>(SI->getOperand(0)) &&
1396 "Unexpected heap-sra user!");
1398 // Insert a store of null into each global.
1399 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
1400 PointerType *PT = cast<PointerType>(FieldGlobals[i]->getType());
1401 Constant *Null = Constant::getNullValue(PT->getElementType());
1402 new StoreInst(Null, FieldGlobals[i], SI);
1404 // Erase the original store.
1405 SI->eraseFromParent();
1408 // While we have PHIs that are interesting to rewrite, do it.
1409 while (!PHIsToRewrite.empty()) {
1410 PHINode *PN = PHIsToRewrite.back().first;
1411 unsigned FieldNo = PHIsToRewrite.back().second;
1412 PHIsToRewrite.pop_back();
1413 PHINode *FieldPN = cast<PHINode>(InsertedScalarizedValues[PN][FieldNo]);
1414 assert(FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi");
1416 // Add all the incoming values. This can materialize more phis.
1417 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1418 Value *InVal = PN->getIncomingValue(i);
1419 InVal = GetHeapSROAValue(InVal, FieldNo, InsertedScalarizedValues,
1421 FieldPN->addIncoming(InVal, PN->getIncomingBlock(i));
1425 // Drop all inter-phi links and any loads that made it this far.
1426 for (DenseMap<Value*, std::vector<Value*> >::iterator
1427 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end();
1429 if (PHINode *PN = dyn_cast<PHINode>(I->first))
1430 PN->dropAllReferences();
1431 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first))
1432 LI->dropAllReferences();
1435 // Delete all the phis and loads now that inter-references are dead.
1436 for (DenseMap<Value*, std::vector<Value*> >::iterator
1437 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end();
1439 if (PHINode *PN = dyn_cast<PHINode>(I->first))
1440 PN->eraseFromParent();
1441 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first))
1442 LI->eraseFromParent();
1445 // The old global is now dead, remove it.
1446 GV->eraseFromParent();
1449 return cast<GlobalVariable>(FieldGlobals[0]);
1452 /// This function is called when we see a pointer global variable with a single
1453 /// value stored it that is a malloc or cast of malloc.
1454 static bool tryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, CallInst *CI,
1456 AtomicOrdering Ordering,
1457 const DataLayout &DL,
1458 TargetLibraryInfo *TLI) {
1459 // If this is a malloc of an abstract type, don't touch it.
1460 if (!AllocTy->isSized())
1463 // We can't optimize this global unless all uses of it are *known* to be
1464 // of the malloc value, not of the null initializer value (consider a use
1465 // that compares the global's value against zero to see if the malloc has
1466 // been reached). To do this, we check to see if all uses of the global
1467 // would trap if the global were null: this proves that they must all
1468 // happen after the malloc.
1469 if (!AllUsesOfLoadedValueWillTrapIfNull(GV))
1472 // We can't optimize this if the malloc itself is used in a complex way,
1473 // for example, being stored into multiple globals. This allows the
1474 // malloc to be stored into the specified global, loaded icmp'd, and
1475 // GEP'd. These are all things we could transform to using the global
1477 SmallPtrSet<const PHINode*, 8> PHIs;
1478 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(CI, GV, PHIs))
1481 // If we have a global that is only initialized with a fixed size malloc,
1482 // transform the program to use global memory instead of malloc'd memory.
1483 // This eliminates dynamic allocation, avoids an indirection accessing the
1484 // data, and exposes the resultant global to further GlobalOpt.
1485 // We cannot optimize the malloc if we cannot determine malloc array size.
1486 Value *NElems = getMallocArraySize(CI, DL, TLI, true);
1490 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems))
1491 // Restrict this transformation to only working on small allocations
1492 // (2048 bytes currently), as we don't want to introduce a 16M global or
1494 if (NElements->getZExtValue() * DL.getTypeAllocSize(AllocTy) < 2048) {
1495 OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, DL, TLI);
1499 // If the allocation is an array of structures, consider transforming this
1500 // into multiple malloc'd arrays, one for each field. This is basically
1501 // SRoA for malloc'd memory.
1503 if (Ordering != NotAtomic)
1506 // If this is an allocation of a fixed size array of structs, analyze as a
1507 // variable size array. malloc [100 x struct],1 -> malloc struct, 100
1508 if (NElems == ConstantInt::get(CI->getArgOperand(0)->getType(), 1))
1509 if (ArrayType *AT = dyn_cast<ArrayType>(AllocTy))
1510 AllocTy = AT->getElementType();
1512 StructType *AllocSTy = dyn_cast<StructType>(AllocTy);
1516 // This the structure has an unreasonable number of fields, leave it
1518 if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 &&
1519 AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, CI)) {
1521 // If this is a fixed size array, transform the Malloc to be an alloc of
1522 // structs. malloc [100 x struct],1 -> malloc struct, 100
1523 if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) {
1524 Type *IntPtrTy = DL.getIntPtrType(CI->getType());
1525 unsigned TypeSize = DL.getStructLayout(AllocSTy)->getSizeInBytes();
1526 Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize);
1527 Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements());
1528 Instruction *Malloc = CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy,
1529 AllocSize, NumElements,
1530 nullptr, CI->getName());
1531 Instruction *Cast = new BitCastInst(Malloc, CI->getType(), "tmp", CI);
1532 CI->replaceAllUsesWith(Cast);
1533 CI->eraseFromParent();
1534 if (BitCastInst *BCI = dyn_cast<BitCastInst>(Malloc))
1535 CI = cast<CallInst>(BCI->getOperand(0));
1537 CI = cast<CallInst>(Malloc);
1540 PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, DL, TLI, true), DL,
1548 // Try to optimize globals based on the knowledge that only one value (besides
1549 // its initializer) is ever stored to the global.
1550 static bool optimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
1551 AtomicOrdering Ordering,
1552 const DataLayout &DL,
1553 TargetLibraryInfo *TLI) {
1554 // Ignore no-op GEPs and bitcasts.
1555 StoredOnceVal = StoredOnceVal->stripPointerCasts();
1557 // If we are dealing with a pointer global that is initialized to null and
1558 // only has one (non-null) value stored into it, then we can optimize any
1559 // users of the loaded value (often calls and loads) that would trap if the
1561 if (GV->getInitializer()->getType()->isPointerTy() &&
1562 GV->getInitializer()->isNullValue()) {
1563 if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) {
1564 if (GV->getInitializer()->getType() != SOVC->getType())
1565 SOVC = ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType());
1567 // Optimize away any trapping uses of the loaded value.
1568 if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, DL, TLI))
1570 } else if (CallInst *CI = extractMallocCall(StoredOnceVal, TLI)) {
1571 Type *MallocType = getMallocAllocatedType(CI, TLI);
1572 if (MallocType && tryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType,
1581 /// At this point, we have learned that the only two values ever stored into GV
1582 /// are its initializer and OtherVal. See if we can shrink the global into a
1583 /// boolean and select between the two values whenever it is used. This exposes
1584 /// the values to other scalar optimizations.
1585 static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
1586 Type *GVElType = GV->getType()->getElementType();
1588 // If GVElType is already i1, it is already shrunk. If the type of the GV is
1589 // an FP value, pointer or vector, don't do this optimization because a select
1590 // between them is very expensive and unlikely to lead to later
1591 // simplification. In these cases, we typically end up with "cond ? v1 : v2"
1592 // where v1 and v2 both require constant pool loads, a big loss.
1593 if (GVElType == Type::getInt1Ty(GV->getContext()) ||
1594 GVElType->isFloatingPointTy() ||
1595 GVElType->isPointerTy() || GVElType->isVectorTy())
1598 // Walk the use list of the global seeing if all the uses are load or store.
1599 // If there is anything else, bail out.
1600 for (User *U : GV->users())
1601 if (!isa<LoadInst>(U) && !isa<StoreInst>(U))
1604 DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV << "\n");
1606 // Create the new global, initializing it to false.
1607 GlobalVariable *NewGV = new GlobalVariable(Type::getInt1Ty(GV->getContext()),
1609 GlobalValue::InternalLinkage,
1610 ConstantInt::getFalse(GV->getContext()),
1612 GV->getThreadLocalMode(),
1613 GV->getType()->getAddressSpace());
1614 GV->getParent()->getGlobalList().insert(GV->getIterator(), NewGV);
1616 Constant *InitVal = GV->getInitializer();
1617 assert(InitVal->getType() != Type::getInt1Ty(GV->getContext()) &&
1618 "No reason to shrink to bool!");
1620 // If initialized to zero and storing one into the global, we can use a cast
1621 // instead of a select to synthesize the desired value.
1622 bool IsOneZero = false;
1623 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal))
1624 IsOneZero = InitVal->isNullValue() && CI->isOne();
1626 while (!GV->use_empty()) {
1627 Instruction *UI = cast<Instruction>(GV->user_back());
1628 if (StoreInst *SI = dyn_cast<StoreInst>(UI)) {
1629 // Change the store into a boolean store.
1630 bool StoringOther = SI->getOperand(0) == OtherVal;
1631 // Only do this if we weren't storing a loaded value.
1633 if (StoringOther || SI->getOperand(0) == InitVal) {
1634 StoreVal = ConstantInt::get(Type::getInt1Ty(GV->getContext()),
1637 // Otherwise, we are storing a previously loaded copy. To do this,
1638 // change the copy from copying the original value to just copying the
1640 Instruction *StoredVal = cast<Instruction>(SI->getOperand(0));
1642 // If we've already replaced the input, StoredVal will be a cast or
1643 // select instruction. If not, it will be a load of the original
1645 if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
1646 assert(LI->getOperand(0) == GV && "Not a copy!");
1647 // Insert a new load, to preserve the saved value.
1648 StoreVal = new LoadInst(NewGV, LI->getName()+".b", false, 0,
1649 LI->getOrdering(), LI->getSynchScope(), LI);
1651 assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) &&
1652 "This is not a form that we understand!");
1653 StoreVal = StoredVal->getOperand(0);
1654 assert(isa<LoadInst>(StoreVal) && "Not a load of NewGV!");
1657 new StoreInst(StoreVal, NewGV, false, 0,
1658 SI->getOrdering(), SI->getSynchScope(), SI);
1660 // Change the load into a load of bool then a select.
1661 LoadInst *LI = cast<LoadInst>(UI);
1662 LoadInst *NLI = new LoadInst(NewGV, LI->getName()+".b", false, 0,
1663 LI->getOrdering(), LI->getSynchScope(), LI);
1666 NSI = new ZExtInst(NLI, LI->getType(), "", LI);
1668 NSI = SelectInst::Create(NLI, OtherVal, InitVal, "", LI);
1670 LI->replaceAllUsesWith(NSI);
1672 UI->eraseFromParent();
1675 // Retain the name of the old global variable. People who are debugging their
1676 // programs may expect these variables to be named the same.
1677 NewGV->takeName(GV);
1678 GV->eraseFromParent();
1682 bool GlobalOpt::deleteIfDead(GlobalValue &GV) {
1683 GV.removeDeadConstantUsers();
1685 if (!GV.isDiscardableIfUnused())
1688 if (const Comdat *C = GV.getComdat())
1689 if (!GV.hasLocalLinkage() && NotDiscardableComdats.count(C))
1693 if (auto *F = dyn_cast<Function>(&GV))
1694 Dead = F->isDefTriviallyDead();
1696 Dead = GV.use_empty();
1700 DEBUG(dbgs() << "GLOBAL DEAD: " << GV << "\n");
1701 GV.eraseFromParent();
1706 /// Analyze the specified global variable and optimize it if possible. If we
1707 /// make a change, return true.
1708 bool GlobalOpt::processGlobal(GlobalValue &GV) {
1709 // Do more involved optimizations if the global is internal.
1710 if (!GV.hasLocalLinkage())
1715 if (GlobalStatus::analyzeGlobal(&GV, GS))
1718 bool Changed = false;
1719 if (!GS.IsCompared && !GV.hasUnnamedAddr()) {
1720 GV.setUnnamedAddr(true);
1725 auto *GVar = dyn_cast<GlobalVariable>(&GV);
1729 if (GVar->isConstant() || !GVar->hasInitializer())
1732 return processInternalGlobal(GVar, GS) || Changed;
1735 bool GlobalOpt::isPointerValueDeadOnEntryToFunction(const Function *F, GlobalValue *GV) {
1736 // Find all uses of GV. We expect them all to be in F, and if we can't
1737 // identify any of the uses we bail out.
1739 // On each of these uses, identify if the memory that GV points to is
1740 // used/required/live at the start of the function. If it is not, for example
1741 // if the first thing the function does is store to the GV, the GV can
1742 // possibly be demoted.
1744 // We don't do an exhaustive search for memory operations - simply look
1745 // through bitcasts as they're quite common and benign.
1746 const DataLayout &DL = GV->getParent()->getDataLayout();
1747 SmallVector<LoadInst *, 4> Loads;
1748 SmallVector<StoreInst *, 4> Stores;
1749 for (auto *U : GV->users()) {
1750 if (Operator::getOpcode(U) == Instruction::BitCast) {
1751 for (auto *UU : U->users()) {
1752 if (auto *LI = dyn_cast<LoadInst>(UU))
1753 Loads.push_back(LI);
1754 else if (auto *SI = dyn_cast<StoreInst>(UU))
1755 Stores.push_back(SI);
1762 Instruction *I = dyn_cast<Instruction>(U);
1765 assert(I->getParent()->getParent() == F);
1767 if (auto *LI = dyn_cast<LoadInst>(I))
1768 Loads.push_back(LI);
1769 else if (auto *SI = dyn_cast<StoreInst>(I))
1770 Stores.push_back(SI);
1775 // We have identified all uses of GV into loads and stores. Now check if all
1776 // of them are known not to depend on the value of the global at the function
1777 // entry point. We do this by ensuring that every load is dominated by at
1779 auto &DT = getAnalysis<DominatorTreeWrapperPass>(*const_cast<Function *>(F))
1782 // The below check is quadratic. Check we're not going to do too many tests.
1783 // FIXME: Even though this will always have worst-case quadratic time, we
1784 // could put effort into minimizing the average time by putting stores that
1785 // have been shown to dominate at least one load at the beginning of the
1786 // Stores array, making subsequent dominance checks more likely to succeed
1789 // The threshold here is fairly large because global->local demotion is a
1790 // very powerful optimization should it fire.
1791 const unsigned Threshold = 100;
1792 if (Loads.size() * Stores.size() > Threshold)
1795 for (auto *L : Loads) {
1796 auto *LTy = L->getType();
1797 if (!std::any_of(Stores.begin(), Stores.end(), [&](StoreInst *S) {
1798 auto *STy = S->getValueOperand()->getType();
1799 // The load is only dominated by the store if DomTree says so
1800 // and the number of bits loaded in L is less than or equal to
1801 // the number of bits stored in S.
1802 return DT.dominates(S, L) &&
1803 DL.getTypeStoreSize(LTy) <= DL.getTypeStoreSize(STy);
1807 // All loads have known dependences inside F, so the global can be localized.
1811 /// C may have non-instruction users. Can all of those users be turned into
1813 static bool allNonInstructionUsersCanBeMadeInstructions(Constant *C) {
1814 // We don't do this exhaustively. The most common pattern that we really need
1815 // to care about is a constant GEP or constant bitcast - so just looking
1816 // through one single ConstantExpr.
1818 // The set of constants that this function returns true for must be able to be
1819 // handled by makeAllConstantUsesInstructions.
1820 for (auto *U : C->users()) {
1821 if (isa<Instruction>(U))
1823 if (!isa<ConstantExpr>(U))
1824 // Non instruction, non-constantexpr user; cannot convert this.
1826 for (auto *UU : U->users())
1827 if (!isa<Instruction>(UU))
1828 // A constantexpr used by another constant. We don't try and recurse any
1829 // further but just bail out at this point.
1836 /// C may have non-instruction users, and
1837 /// allNonInstructionUsersCanBeMadeInstructions has returned true. Convert the
1838 /// non-instruction users to instructions.
1839 static void makeAllConstantUsesInstructions(Constant *C) {
1840 SmallVector<ConstantExpr*,4> Users;
1841 for (auto *U : C->users()) {
1842 if (isa<ConstantExpr>(U))
1843 Users.push_back(cast<ConstantExpr>(U));
1845 // We should never get here; allNonInstructionUsersCanBeMadeInstructions
1846 // should not have returned true for C.
1848 isa<Instruction>(U) &&
1849 "Can't transform non-constantexpr non-instruction to instruction!");
1852 SmallVector<Value*,4> UUsers;
1853 for (auto *U : Users) {
1855 for (auto *UU : U->users())
1856 UUsers.push_back(UU);
1857 for (auto *UU : UUsers) {
1858 Instruction *UI = cast<Instruction>(UU);
1859 Instruction *NewU = U->getAsInstruction();
1860 NewU->insertBefore(UI);
1861 UI->replaceUsesOfWith(U, NewU);
1863 U->dropAllReferences();
1867 /// Analyze the specified global variable and optimize
1868 /// it if possible. If we make a change, return true.
1869 bool GlobalOpt::processInternalGlobal(GlobalVariable *GV,
1870 const GlobalStatus &GS) {
1871 auto &DL = GV->getParent()->getDataLayout();
1872 // If this is a first class global and has only one accessing function and
1873 // this function is non-recursive, we replace the global with a local alloca
1874 // in this function.
1876 // NOTE: It doesn't make sense to promote non-single-value types since we
1877 // are just replacing static memory to stack memory.
1879 // If the global is in different address space, don't bring it to stack.
1880 if (!GS.HasMultipleAccessingFunctions &&
1881 GS.AccessingFunction &&
1882 GV->getType()->getElementType()->isSingleValueType() &&
1883 GV->getType()->getAddressSpace() == 0 &&
1884 !GV->isExternallyInitialized() &&
1885 allNonInstructionUsersCanBeMadeInstructions(GV) &&
1886 GS.AccessingFunction->doesNotRecurse() &&
1887 isPointerValueDeadOnEntryToFunction(GS.AccessingFunction, GV) ) {
1888 DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV << "\n");
1889 Instruction &FirstI = const_cast<Instruction&>(*GS.AccessingFunction
1890 ->getEntryBlock().begin());
1891 Type *ElemTy = GV->getType()->getElementType();
1892 // FIXME: Pass Global's alignment when globals have alignment
1893 AllocaInst *Alloca = new AllocaInst(ElemTy, nullptr,
1894 GV->getName(), &FirstI);
1895 if (!isa<UndefValue>(GV->getInitializer()))
1896 new StoreInst(GV->getInitializer(), Alloca, &FirstI);
1898 makeAllConstantUsesInstructions(GV);
1900 GV->replaceAllUsesWith(Alloca);
1901 GV->eraseFromParent();
1906 // If the global is never loaded (but may be stored to), it is dead.
1909 DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV << "\n");
1912 if (isLeakCheckerRoot(GV)) {
1913 // Delete any constant stores to the global.
1914 Changed = CleanupPointerRootUsers(GV, TLI);
1916 // Delete any stores we can find to the global. We may not be able to
1917 // make it completely dead though.
1918 Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
1921 // If the global is dead now, delete it.
1922 if (GV->use_empty()) {
1923 GV->eraseFromParent();
1929 } else if (GS.StoredType <= GlobalStatus::InitializerStored) {
1930 DEBUG(dbgs() << "MARKING CONSTANT: " << *GV << "\n");
1931 GV->setConstant(true);
1933 // Clean up any obviously simplifiable users now.
1934 CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
1936 // If the global is dead now, just nuke it.
1937 if (GV->use_empty()) {
1938 DEBUG(dbgs() << " *** Marking constant allowed us to simplify "
1939 << "all users and delete global!\n");
1940 GV->eraseFromParent();
1946 } else if (!GV->getInitializer()->getType()->isSingleValueType()) {
1947 const DataLayout &DL = GV->getParent()->getDataLayout();
1948 if (SRAGlobal(GV, DL))
1950 } else if (GS.StoredType == GlobalStatus::StoredOnce && GS.StoredOnceValue) {
1951 // If the initial value for the global was an undef value, and if only
1952 // one other value was stored into it, we can just change the
1953 // initializer to be the stored value, then delete all stores to the
1954 // global. This allows us to mark it constant.
1955 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue))
1956 if (isa<UndefValue>(GV->getInitializer())) {
1957 // Change the initial value here.
1958 GV->setInitializer(SOVConstant);
1960 // Clean up any obviously simplifiable users now.
1961 CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
1963 if (GV->use_empty()) {
1964 DEBUG(dbgs() << " *** Substituting initializer allowed us to "
1965 << "simplify all users and delete global!\n");
1966 GV->eraseFromParent();
1973 // Try to optimize globals based on the knowledge that only one value
1974 // (besides its initializer) is ever stored to the global.
1975 if (optimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GS.Ordering, DL, TLI))
1978 // Otherwise, if the global was not a boolean, we can shrink it to be a
1980 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) {
1981 if (GS.Ordering == NotAtomic) {
1982 if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) {
1993 /// Walk all of the direct calls of the specified function, changing them to
1995 static void ChangeCalleesToFastCall(Function *F) {
1996 for (User *U : F->users()) {
1997 if (isa<BlockAddress>(U))
1999 CallSite CS(cast<Instruction>(U));
2000 CS.setCallingConv(CallingConv::Fast);
2004 static AttributeSet StripNest(LLVMContext &C, const AttributeSet &Attrs) {
2005 for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) {
2006 unsigned Index = Attrs.getSlotIndex(i);
2007 if (!Attrs.getSlotAttributes(i).hasAttribute(Index, Attribute::Nest))
2010 // There can be only one.
2011 return Attrs.removeAttribute(C, Index, Attribute::Nest);
2017 static void RemoveNestAttribute(Function *F) {
2018 F->setAttributes(StripNest(F->getContext(), F->getAttributes()));
2019 for (User *U : F->users()) {
2020 if (isa<BlockAddress>(U))
2022 CallSite CS(cast<Instruction>(U));
2023 CS.setAttributes(StripNest(F->getContext(), CS.getAttributes()));
2027 /// Return true if this is a calling convention that we'd like to change. The
2028 /// idea here is that we don't want to mess with the convention if the user
2029 /// explicitly requested something with performance implications like coldcc,
2030 /// GHC, or anyregcc.
2031 static bool isProfitableToMakeFastCC(Function *F) {
2032 CallingConv::ID CC = F->getCallingConv();
2033 // FIXME: Is it worth transforming x86_stdcallcc and x86_fastcallcc?
2034 return CC == CallingConv::C || CC == CallingConv::X86_ThisCall;
2037 bool GlobalOpt::OptimizeFunctions(Module &M) {
2038 bool Changed = false;
2039 // Optimize functions.
2040 for (Module::iterator FI = M.begin(), E = M.end(); FI != E; ) {
2041 Function *F = &*FI++;
2042 // Functions without names cannot be referenced outside this module.
2043 if (!F->hasName() && !F->isDeclaration() && !F->hasLocalLinkage())
2044 F->setLinkage(GlobalValue::InternalLinkage);
2046 if (deleteIfDead(*F)) {
2051 Changed |= processGlobal(*F);
2053 if (!F->hasLocalLinkage())
2055 if (isProfitableToMakeFastCC(F) && !F->isVarArg() &&
2056 !F->hasAddressTaken()) {
2057 // If this function has a calling convention worth changing, is not a
2058 // varargs function, and is only called directly, promote it to use the
2059 // Fast calling convention.
2060 F->setCallingConv(CallingConv::Fast);
2061 ChangeCalleesToFastCall(F);
2066 if (F->getAttributes().hasAttrSomewhere(Attribute::Nest) &&
2067 !F->hasAddressTaken()) {
2068 // The function is not used by a trampoline intrinsic, so it is safe
2069 // to remove the 'nest' attribute.
2070 RemoveNestAttribute(F);
2078 bool GlobalOpt::OptimizeGlobalVars(Module &M) {
2079 bool Changed = false;
2081 for (Module::global_iterator GVI = M.global_begin(), E = M.global_end();
2083 GlobalVariable *GV = &*GVI++;
2084 // Global variables without names cannot be referenced outside this module.
2085 if (!GV->hasName() && !GV->isDeclaration() && !GV->hasLocalLinkage())
2086 GV->setLinkage(GlobalValue::InternalLinkage);
2087 // Simplify the initializer.
2088 if (GV->hasInitializer())
2089 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GV->getInitializer())) {
2090 auto &DL = M.getDataLayout();
2091 Constant *New = ConstantFoldConstantExpression(CE, DL, TLI);
2092 if (New && New != CE)
2093 GV->setInitializer(New);
2096 if (deleteIfDead(*GV)) {
2101 Changed |= processGlobal(*GV);
2107 isSimpleEnoughValueToCommit(Constant *C,
2108 SmallPtrSetImpl<Constant *> &SimpleConstants,
2109 const DataLayout &DL);
2111 /// Return true if the specified constant can be handled by the code generator.
2112 /// We don't want to generate something like:
2113 /// void *X = &X/42;
2114 /// because the code generator doesn't have a relocation that can handle that.
2116 /// This function should be called if C was not found (but just got inserted)
2117 /// in SimpleConstants to avoid having to rescan the same constants all the
2120 isSimpleEnoughValueToCommitHelper(Constant *C,
2121 SmallPtrSetImpl<Constant *> &SimpleConstants,
2122 const DataLayout &DL) {
2123 // Simple global addresses are supported, do not allow dllimport or
2124 // thread-local globals.
2125 if (auto *GV = dyn_cast<GlobalValue>(C))
2126 return !GV->hasDLLImportStorageClass() && !GV->isThreadLocal();
2128 // Simple integer, undef, constant aggregate zero, etc are all supported.
2129 if (C->getNumOperands() == 0 || isa<BlockAddress>(C))
2132 // Aggregate values are safe if all their elements are.
2133 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C) ||
2134 isa<ConstantVector>(C)) {
2135 for (Value *Op : C->operands())
2136 if (!isSimpleEnoughValueToCommit(cast<Constant>(Op), SimpleConstants, DL))
2141 // We don't know exactly what relocations are allowed in constant expressions,
2142 // so we allow &global+constantoffset, which is safe and uniformly supported
2144 ConstantExpr *CE = cast<ConstantExpr>(C);
2145 switch (CE->getOpcode()) {
2146 case Instruction::BitCast:
2147 // Bitcast is fine if the casted value is fine.
2148 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
2150 case Instruction::IntToPtr:
2151 case Instruction::PtrToInt:
2152 // int <=> ptr is fine if the int type is the same size as the
2154 if (DL.getTypeSizeInBits(CE->getType()) !=
2155 DL.getTypeSizeInBits(CE->getOperand(0)->getType()))
2157 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
2159 // GEP is fine if it is simple + constant offset.
2160 case Instruction::GetElementPtr:
2161 for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i)
2162 if (!isa<ConstantInt>(CE->getOperand(i)))
2164 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
2166 case Instruction::Add:
2167 // We allow simple+cst.
2168 if (!isa<ConstantInt>(CE->getOperand(1)))
2170 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
2176 isSimpleEnoughValueToCommit(Constant *C,
2177 SmallPtrSetImpl<Constant *> &SimpleConstants,
2178 const DataLayout &DL) {
2179 // If we already checked this constant, we win.
2180 if (!SimpleConstants.insert(C).second)
2182 // Check the constant.
2183 return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, DL);
2187 /// Return true if this constant is simple enough for us to understand. In
2188 /// particular, if it is a cast to anything other than from one pointer type to
2189 /// another pointer type, we punt. We basically just support direct accesses to
2190 /// globals and GEP's of globals. This should be kept up to date with
2192 static bool isSimpleEnoughPointerToCommit(Constant *C) {
2193 // Conservatively, avoid aggregate types. This is because we don't
2194 // want to worry about them partially overlapping other stores.
2195 if (!cast<PointerType>(C->getType())->getElementType()->isSingleValueType())
2198 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C))
2199 // Do not allow weak/*_odr/linkonce linkage or external globals.
2200 return GV->hasUniqueInitializer();
2202 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
2203 // Handle a constantexpr gep.
2204 if (CE->getOpcode() == Instruction::GetElementPtr &&
2205 isa<GlobalVariable>(CE->getOperand(0)) &&
2206 cast<GEPOperator>(CE)->isInBounds()) {
2207 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0));
2208 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or
2209 // external globals.
2210 if (!GV->hasUniqueInitializer())
2213 // The first index must be zero.
2214 ConstantInt *CI = dyn_cast<ConstantInt>(*std::next(CE->op_begin()));
2215 if (!CI || !CI->isZero()) return false;
2217 // The remaining indices must be compile-time known integers within the
2218 // notional bounds of the corresponding static array types.
2219 if (!CE->isGEPWithNoNotionalOverIndexing())
2222 return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE);
2224 // A constantexpr bitcast from a pointer to another pointer is a no-op,
2225 // and we know how to evaluate it by moving the bitcast from the pointer
2226 // operand to the value operand.
2227 } else if (CE->getOpcode() == Instruction::BitCast &&
2228 isa<GlobalVariable>(CE->getOperand(0))) {
2229 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or
2230 // external globals.
2231 return cast<GlobalVariable>(CE->getOperand(0))->hasUniqueInitializer();
2238 /// Evaluate a piece of a constantexpr store into a global initializer. This
2239 /// returns 'Init' modified to reflect 'Val' stored into it. At this point, the
2240 /// GEP operands of Addr [0, OpNo) have been stepped into.
2241 static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
2242 ConstantExpr *Addr, unsigned OpNo) {
2243 // Base case of the recursion.
2244 if (OpNo == Addr->getNumOperands()) {
2245 assert(Val->getType() == Init->getType() && "Type mismatch!");
2249 SmallVector<Constant*, 32> Elts;
2250 if (StructType *STy = dyn_cast<StructType>(Init->getType())) {
2251 // Break up the constant into its elements.
2252 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
2253 Elts.push_back(Init->getAggregateElement(i));
2255 // Replace the element that we are supposed to.
2256 ConstantInt *CU = cast<ConstantInt>(Addr->getOperand(OpNo));
2257 unsigned Idx = CU->getZExtValue();
2258 assert(Idx < STy->getNumElements() && "Struct index out of range!");
2259 Elts[Idx] = EvaluateStoreInto(Elts[Idx], Val, Addr, OpNo+1);
2261 // Return the modified struct.
2262 return ConstantStruct::get(STy, Elts);
2265 ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo));
2266 SequentialType *InitTy = cast<SequentialType>(Init->getType());
2269 if (ArrayType *ATy = dyn_cast<ArrayType>(InitTy))
2270 NumElts = ATy->getNumElements();
2272 NumElts = InitTy->getVectorNumElements();
2274 // Break up the array into elements.
2275 for (uint64_t i = 0, e = NumElts; i != e; ++i)
2276 Elts.push_back(Init->getAggregateElement(i));
2278 assert(CI->getZExtValue() < NumElts);
2279 Elts[CI->getZExtValue()] =
2280 EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1);
2282 if (Init->getType()->isArrayTy())
2283 return ConstantArray::get(cast<ArrayType>(InitTy), Elts);
2284 return ConstantVector::get(Elts);
2287 /// We have decided that Addr (which satisfies the predicate
2288 /// isSimpleEnoughPointerToCommit) should get Val as its value. Make it happen.
2289 static void CommitValueTo(Constant *Val, Constant *Addr) {
2290 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
2291 assert(GV->hasInitializer());
2292 GV->setInitializer(Val);
2296 ConstantExpr *CE = cast<ConstantExpr>(Addr);
2297 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0));
2298 GV->setInitializer(EvaluateStoreInto(GV->getInitializer(), Val, CE, 2));
2303 /// This class evaluates LLVM IR, producing the Constant representing each SSA
2304 /// instruction. Changes to global variables are stored in a mapping that can
2305 /// be iterated over after the evaluation is complete. Once an evaluation call
2306 /// fails, the evaluation object should not be reused.
2309 Evaluator(const DataLayout &DL, const TargetLibraryInfo *TLI)
2310 : DL(DL), TLI(TLI) {
2311 ValueStack.emplace_back();
2315 for (auto &Tmp : AllocaTmps)
2316 // If there are still users of the alloca, the program is doing something
2317 // silly, e.g. storing the address of the alloca somewhere and using it
2318 // later. Since this is undefined, we'll just make it be null.
2319 if (!Tmp->use_empty())
2320 Tmp->replaceAllUsesWith(Constant::getNullValue(Tmp->getType()));
2323 /// Evaluate a call to function F, returning true if successful, false if we
2324 /// can't evaluate it. ActualArgs contains the formal arguments for the
2326 bool EvaluateFunction(Function *F, Constant *&RetVal,
2327 const SmallVectorImpl<Constant*> &ActualArgs);
2329 /// Evaluate all instructions in block BB, returning true if successful, false
2330 /// if we can't evaluate it. NewBB returns the next BB that control flows
2331 /// into, or null upon return.
2332 bool EvaluateBlock(BasicBlock::iterator CurInst, BasicBlock *&NextBB);
2334 Constant *getVal(Value *V) {
2335 if (Constant *CV = dyn_cast<Constant>(V)) return CV;
2336 Constant *R = ValueStack.back().lookup(V);
2337 assert(R && "Reference to an uncomputed value!");
2341 void setVal(Value *V, Constant *C) {
2342 ValueStack.back()[V] = C;
2345 const DenseMap<Constant*, Constant*> &getMutatedMemory() const {
2346 return MutatedMemory;
2349 const SmallPtrSetImpl<GlobalVariable*> &getInvariants() const {
2354 Constant *ComputeLoadResult(Constant *P);
2356 /// As we compute SSA register values, we store their contents here. The back
2357 /// of the deque contains the current function and the stack contains the
2358 /// values in the calling frames.
2359 std::deque<DenseMap<Value*, Constant*>> ValueStack;
2361 /// This is used to detect recursion. In pathological situations we could hit
2362 /// exponential behavior, but at least there is nothing unbounded.
2363 SmallVector<Function*, 4> CallStack;
2365 /// For each store we execute, we update this map. Loads check this to get
2366 /// the most up-to-date value. If evaluation is successful, this state is
2367 /// committed to the process.
2368 DenseMap<Constant*, Constant*> MutatedMemory;
2370 /// To 'execute' an alloca, we create a temporary global variable to represent
2371 /// its body. This vector is needed so we can delete the temporary globals
2372 /// when we are done.
2373 SmallVector<std::unique_ptr<GlobalVariable>, 32> AllocaTmps;
2375 /// These global variables have been marked invariant by the static
2377 SmallPtrSet<GlobalVariable*, 8> Invariants;
2379 /// These are constants we have checked and know to be simple enough to live
2380 /// in a static initializer of a global.
2381 SmallPtrSet<Constant*, 8> SimpleConstants;
2383 const DataLayout &DL;
2384 const TargetLibraryInfo *TLI;
2387 } // anonymous namespace
2389 /// Return the value that would be computed by a load from P after the stores
2390 /// reflected by 'memory' have been performed. If we can't decide, return null.
2391 Constant *Evaluator::ComputeLoadResult(Constant *P) {
2392 // If this memory location has been recently stored, use the stored value: it
2393 // is the most up-to-date.
2394 DenseMap<Constant*, Constant*>::const_iterator I = MutatedMemory.find(P);
2395 if (I != MutatedMemory.end()) return I->second;
2398 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
2399 if (GV->hasDefinitiveInitializer())
2400 return GV->getInitializer();
2404 // Handle a constantexpr getelementptr.
2405 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(P))
2406 if (CE->getOpcode() == Instruction::GetElementPtr &&
2407 isa<GlobalVariable>(CE->getOperand(0))) {
2408 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0));
2409 if (GV->hasDefinitiveInitializer())
2410 return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE);
2413 return nullptr; // don't know how to evaluate.
2416 /// Evaluate all instructions in block BB, returning true if successful, false
2417 /// if we can't evaluate it. NewBB returns the next BB that control flows into,
2418 /// or null upon return.
2419 bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
2420 BasicBlock *&NextBB) {
2421 // This is the main evaluation loop.
2423 Constant *InstResult = nullptr;
2425 DEBUG(dbgs() << "Evaluating Instruction: " << *CurInst << "\n");
2427 if (StoreInst *SI = dyn_cast<StoreInst>(CurInst)) {
2428 if (!SI->isSimple()) {
2429 DEBUG(dbgs() << "Store is not simple! Can not evaluate.\n");
2430 return false; // no volatile/atomic accesses.
2432 Constant *Ptr = getVal(SI->getOperand(1));
2433 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
2434 DEBUG(dbgs() << "Folding constant ptr expression: " << *Ptr);
2435 Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
2436 DEBUG(dbgs() << "; To: " << *Ptr << "\n");
2438 if (!isSimpleEnoughPointerToCommit(Ptr)) {
2439 // If this is too complex for us to commit, reject it.
2440 DEBUG(dbgs() << "Pointer is too complex for us to evaluate store.");
2444 Constant *Val = getVal(SI->getOperand(0));
2446 // If this might be too difficult for the backend to handle (e.g. the addr
2447 // of one global variable divided by another) then we can't commit it.
2448 if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, DL)) {
2449 DEBUG(dbgs() << "Store value is too complex to evaluate store. " << *Val
2454 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
2455 if (CE->getOpcode() == Instruction::BitCast) {
2456 DEBUG(dbgs() << "Attempting to resolve bitcast on constant ptr.\n");
2457 // If we're evaluating a store through a bitcast, then we need
2458 // to pull the bitcast off the pointer type and push it onto the
2460 Ptr = CE->getOperand(0);
2462 Type *NewTy = cast<PointerType>(Ptr->getType())->getElementType();
2464 // In order to push the bitcast onto the stored value, a bitcast
2465 // from NewTy to Val's type must be legal. If it's not, we can try
2466 // introspecting NewTy to find a legal conversion.
2467 while (!Val->getType()->canLosslesslyBitCastTo(NewTy)) {
2468 // If NewTy is a struct, we can convert the pointer to the struct
2469 // into a pointer to its first member.
2470 // FIXME: This could be extended to support arrays as well.
2471 if (StructType *STy = dyn_cast<StructType>(NewTy)) {
2472 NewTy = STy->getTypeAtIndex(0U);
2474 IntegerType *IdxTy = IntegerType::get(NewTy->getContext(), 32);
2475 Constant *IdxZero = ConstantInt::get(IdxTy, 0, false);
2476 Constant * const IdxList[] = {IdxZero, IdxZero};
2478 Ptr = ConstantExpr::getGetElementPtr(nullptr, Ptr, IdxList);
2479 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
2480 Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
2482 // If we can't improve the situation by introspecting NewTy,
2483 // we have to give up.
2485 DEBUG(dbgs() << "Failed to bitcast constant ptr, can not "
2491 // If we found compatible types, go ahead and push the bitcast
2492 // onto the stored value.
2493 Val = ConstantExpr::getBitCast(Val, NewTy);
2495 DEBUG(dbgs() << "Evaluated bitcast: " << *Val << "\n");
2499 MutatedMemory[Ptr] = Val;
2500 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CurInst)) {
2501 InstResult = ConstantExpr::get(BO->getOpcode(),
2502 getVal(BO->getOperand(0)),
2503 getVal(BO->getOperand(1)));
2504 DEBUG(dbgs() << "Found a BinaryOperator! Simplifying: " << *InstResult
2506 } else if (CmpInst *CI = dyn_cast<CmpInst>(CurInst)) {
2507 InstResult = ConstantExpr::getCompare(CI->getPredicate(),
2508 getVal(CI->getOperand(0)),
2509 getVal(CI->getOperand(1)));
2510 DEBUG(dbgs() << "Found a CmpInst! Simplifying: " << *InstResult
2512 } else if (CastInst *CI = dyn_cast<CastInst>(CurInst)) {
2513 InstResult = ConstantExpr::getCast(CI->getOpcode(),
2514 getVal(CI->getOperand(0)),
2516 DEBUG(dbgs() << "Found a Cast! Simplifying: " << *InstResult
2518 } else if (SelectInst *SI = dyn_cast<SelectInst>(CurInst)) {
2519 InstResult = ConstantExpr::getSelect(getVal(SI->getOperand(0)),
2520 getVal(SI->getOperand(1)),
2521 getVal(SI->getOperand(2)));
2522 DEBUG(dbgs() << "Found a Select! Simplifying: " << *InstResult
2524 } else if (auto *EVI = dyn_cast<ExtractValueInst>(CurInst)) {
2525 InstResult = ConstantExpr::getExtractValue(
2526 getVal(EVI->getAggregateOperand()), EVI->getIndices());
2527 DEBUG(dbgs() << "Found an ExtractValueInst! Simplifying: " << *InstResult
2529 } else if (auto *IVI = dyn_cast<InsertValueInst>(CurInst)) {
2530 InstResult = ConstantExpr::getInsertValue(
2531 getVal(IVI->getAggregateOperand()),
2532 getVal(IVI->getInsertedValueOperand()), IVI->getIndices());
2533 DEBUG(dbgs() << "Found an InsertValueInst! Simplifying: " << *InstResult
2535 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurInst)) {
2536 Constant *P = getVal(GEP->getOperand(0));
2537 SmallVector<Constant*, 8> GEPOps;
2538 for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end();
2540 GEPOps.push_back(getVal(*i));
2542 ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), P, GEPOps,
2543 cast<GEPOperator>(GEP)->isInBounds());
2544 DEBUG(dbgs() << "Found a GEP! Simplifying: " << *InstResult
2546 } else if (LoadInst *LI = dyn_cast<LoadInst>(CurInst)) {
2548 if (!LI->isSimple()) {
2549 DEBUG(dbgs() << "Found a Load! Not a simple load, can not evaluate.\n");
2550 return false; // no volatile/atomic accesses.
2553 Constant *Ptr = getVal(LI->getOperand(0));
2554 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
2555 Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
2556 DEBUG(dbgs() << "Found a constant pointer expression, constant "
2557 "folding: " << *Ptr << "\n");
2559 InstResult = ComputeLoadResult(Ptr);
2561 DEBUG(dbgs() << "Failed to compute load result. Can not evaluate load."
2563 return false; // Could not evaluate load.
2566 DEBUG(dbgs() << "Evaluated load: " << *InstResult << "\n");
2567 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(CurInst)) {
2568 if (AI->isArrayAllocation()) {
2569 DEBUG(dbgs() << "Found an array alloca. Can not evaluate.\n");
2570 return false; // Cannot handle array allocs.
2572 Type *Ty = AI->getType()->getElementType();
2573 AllocaTmps.push_back(
2574 make_unique<GlobalVariable>(Ty, false, GlobalValue::InternalLinkage,
2575 UndefValue::get(Ty), AI->getName()));
2576 InstResult = AllocaTmps.back().get();
2577 DEBUG(dbgs() << "Found an alloca. Result: " << *InstResult << "\n");
2578 } else if (isa<CallInst>(CurInst) || isa<InvokeInst>(CurInst)) {
2579 CallSite CS(&*CurInst);
2581 // Debug info can safely be ignored here.
2582 if (isa<DbgInfoIntrinsic>(CS.getInstruction())) {
2583 DEBUG(dbgs() << "Ignoring debug info.\n");
2588 // Cannot handle inline asm.
2589 if (isa<InlineAsm>(CS.getCalledValue())) {
2590 DEBUG(dbgs() << "Found inline asm, can not evaluate.\n");
2594 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) {
2595 if (MemSetInst *MSI = dyn_cast<MemSetInst>(II)) {
2596 if (MSI->isVolatile()) {
2597 DEBUG(dbgs() << "Can not optimize a volatile memset " <<
2601 Constant *Ptr = getVal(MSI->getDest());
2602 Constant *Val = getVal(MSI->getValue());
2603 Constant *DestVal = ComputeLoadResult(getVal(Ptr));
2604 if (Val->isNullValue() && DestVal && DestVal->isNullValue()) {
2605 // This memset is a no-op.
2606 DEBUG(dbgs() << "Ignoring no-op memset.\n");
2612 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
2613 II->getIntrinsicID() == Intrinsic::lifetime_end) {
2614 DEBUG(dbgs() << "Ignoring lifetime intrinsic.\n");
2619 if (II->getIntrinsicID() == Intrinsic::invariant_start) {
2620 // We don't insert an entry into Values, as it doesn't have a
2621 // meaningful return value.
2622 if (!II->use_empty()) {
2623 DEBUG(dbgs() << "Found unused invariant_start. Can't evaluate.\n");
2626 ConstantInt *Size = cast<ConstantInt>(II->getArgOperand(0));
2627 Value *PtrArg = getVal(II->getArgOperand(1));
2628 Value *Ptr = PtrArg->stripPointerCasts();
2629 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) {
2630 Type *ElemTy = cast<PointerType>(GV->getType())->getElementType();
2631 if (!Size->isAllOnesValue() &&
2632 Size->getValue().getLimitedValue() >=
2633 DL.getTypeStoreSize(ElemTy)) {
2634 Invariants.insert(GV);
2635 DEBUG(dbgs() << "Found a global var that is an invariant: " << *GV
2638 DEBUG(dbgs() << "Found a global var, but can not treat it as an "
2642 // Continue even if we do nothing.
2645 } else if (II->getIntrinsicID() == Intrinsic::assume) {
2646 DEBUG(dbgs() << "Skipping assume intrinsic.\n");
2651 DEBUG(dbgs() << "Unknown intrinsic. Can not evaluate.\n");
2655 // Resolve function pointers.
2656 Function *Callee = dyn_cast<Function>(getVal(CS.getCalledValue()));
2657 if (!Callee || Callee->mayBeOverridden()) {
2658 DEBUG(dbgs() << "Can not resolve function pointer.\n");
2659 return false; // Cannot resolve.
2662 SmallVector<Constant*, 8> Formals;
2663 for (User::op_iterator i = CS.arg_begin(), e = CS.arg_end(); i != e; ++i)
2664 Formals.push_back(getVal(*i));
2666 if (Callee->isDeclaration()) {
2667 // If this is a function we can constant fold, do it.
2668 if (Constant *C = ConstantFoldCall(Callee, Formals, TLI)) {
2670 DEBUG(dbgs() << "Constant folded function call. Result: " <<
2671 *InstResult << "\n");
2673 DEBUG(dbgs() << "Can not constant fold function call.\n");
2677 if (Callee->getFunctionType()->isVarArg()) {
2678 DEBUG(dbgs() << "Can not constant fold vararg function call.\n");
2682 Constant *RetVal = nullptr;
2683 // Execute the call, if successful, use the return value.
2684 ValueStack.emplace_back();
2685 if (!EvaluateFunction(Callee, RetVal, Formals)) {
2686 DEBUG(dbgs() << "Failed to evaluate function.\n");
2689 ValueStack.pop_back();
2690 InstResult = RetVal;
2693 DEBUG(dbgs() << "Successfully evaluated function. Result: " <<
2694 InstResult << "\n\n");
2696 DEBUG(dbgs() << "Successfully evaluated function. Result: 0\n\n");
2699 } else if (isa<TerminatorInst>(CurInst)) {
2700 DEBUG(dbgs() << "Found a terminator instruction.\n");
2702 if (BranchInst *BI = dyn_cast<BranchInst>(CurInst)) {
2703 if (BI->isUnconditional()) {
2704 NextBB = BI->getSuccessor(0);
2707 dyn_cast<ConstantInt>(getVal(BI->getCondition()));
2708 if (!Cond) return false; // Cannot determine.
2710 NextBB = BI->getSuccessor(!Cond->getZExtValue());
2712 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(CurInst)) {
2714 dyn_cast<ConstantInt>(getVal(SI->getCondition()));
2715 if (!Val) return false; // Cannot determine.
2716 NextBB = SI->findCaseValue(Val).getCaseSuccessor();
2717 } else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(CurInst)) {
2718 Value *Val = getVal(IBI->getAddress())->stripPointerCasts();
2719 if (BlockAddress *BA = dyn_cast<BlockAddress>(Val))
2720 NextBB = BA->getBasicBlock();
2722 return false; // Cannot determine.
2723 } else if (isa<ReturnInst>(CurInst)) {
2726 // invoke, unwind, resume, unreachable.
2727 DEBUG(dbgs() << "Can not handle terminator.");
2728 return false; // Cannot handle this terminator.
2731 // We succeeded at evaluating this block!
2732 DEBUG(dbgs() << "Successfully evaluated block.\n");
2735 // Did not know how to evaluate this!
2736 DEBUG(dbgs() << "Failed to evaluate block due to unhandled instruction."
2741 if (!CurInst->use_empty()) {
2742 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(InstResult))
2743 InstResult = ConstantFoldConstantExpression(CE, DL, TLI);
2745 setVal(&*CurInst, InstResult);
2748 // If we just processed an invoke, we finished evaluating the block.
2749 if (InvokeInst *II = dyn_cast<InvokeInst>(CurInst)) {
2750 NextBB = II->getNormalDest();
2751 DEBUG(dbgs() << "Found an invoke instruction. Finished Block.\n\n");
2755 // Advance program counter.
2760 /// Evaluate a call to function F, returning true if successful, false if we
2761 /// can't evaluate it. ActualArgs contains the formal arguments for the
2763 bool Evaluator::EvaluateFunction(Function *F, Constant *&RetVal,
2764 const SmallVectorImpl<Constant*> &ActualArgs) {
2765 // Check to see if this function is already executing (recursion). If so,
2766 // bail out. TODO: we might want to accept limited recursion.
2767 if (std::find(CallStack.begin(), CallStack.end(), F) != CallStack.end())
2770 CallStack.push_back(F);
2772 // Initialize arguments to the incoming values specified.
2774 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E;
2776 setVal(&*AI, ActualArgs[ArgNo]);
2778 // ExecutedBlocks - We only handle non-looping, non-recursive code. As such,
2779 // we can only evaluate any one basic block at most once. This set keeps
2780 // track of what we have executed so we can detect recursive cases etc.
2781 SmallPtrSet<BasicBlock*, 32> ExecutedBlocks;
2783 // CurBB - The current basic block we're evaluating.
2784 BasicBlock *CurBB = &F->front();
2786 BasicBlock::iterator CurInst = CurBB->begin();
2789 BasicBlock *NextBB = nullptr; // Initialized to avoid compiler warnings.
2790 DEBUG(dbgs() << "Trying to evaluate BB: " << *CurBB << "\n");
2792 if (!EvaluateBlock(CurInst, NextBB))
2796 // Successfully running until there's no next block means that we found
2797 // the return. Fill it the return value and pop the call stack.
2798 ReturnInst *RI = cast<ReturnInst>(CurBB->getTerminator());
2799 if (RI->getNumOperands())
2800 RetVal = getVal(RI->getOperand(0));
2801 CallStack.pop_back();
2805 // Okay, we succeeded in evaluating this control flow. See if we have
2806 // executed the new block before. If so, we have a looping function,
2807 // which we cannot evaluate in reasonable time.
2808 if (!ExecutedBlocks.insert(NextBB).second)
2809 return false; // looped!
2811 // Okay, we have never been in this block before. Check to see if there
2812 // are any PHI nodes. If so, evaluate them with information about where
2814 PHINode *PN = nullptr;
2815 for (CurInst = NextBB->begin();
2816 (PN = dyn_cast<PHINode>(CurInst)); ++CurInst)
2817 setVal(PN, getVal(PN->getIncomingValueForBlock(CurBB)));
2819 // Advance to the next block.
2824 /// Evaluate static constructors in the function, if we can. Return true if we
2825 /// can, false otherwise.
2826 static bool EvaluateStaticConstructor(Function *F, const DataLayout &DL,
2827 const TargetLibraryInfo *TLI) {
2828 // Call the function.
2829 Evaluator Eval(DL, TLI);
2830 Constant *RetValDummy;
2831 bool EvalSuccess = Eval.EvaluateFunction(F, RetValDummy,
2832 SmallVector<Constant*, 0>());
2835 ++NumCtorsEvaluated;
2837 // We succeeded at evaluation: commit the result.
2838 DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '"
2839 << F->getName() << "' to " << Eval.getMutatedMemory().size()
2841 for (DenseMap<Constant*, Constant*>::const_iterator I =
2842 Eval.getMutatedMemory().begin(), E = Eval.getMutatedMemory().end();
2844 CommitValueTo(I->second, I->first);
2845 for (GlobalVariable *GV : Eval.getInvariants())
2846 GV->setConstant(true);
2852 static int compareNames(Constant *const *A, Constant *const *B) {
2853 return (*A)->stripPointerCasts()->getName().compare(
2854 (*B)->stripPointerCasts()->getName());
2857 static void setUsedInitializer(GlobalVariable &V,
2858 const SmallPtrSet<GlobalValue *, 8> &Init) {
2860 V.eraseFromParent();
2864 // Type of pointer to the array of pointers.
2865 PointerType *Int8PtrTy = Type::getInt8PtrTy(V.getContext(), 0);
2867 SmallVector<llvm::Constant *, 8> UsedArray;
2868 for (GlobalValue *GV : Init) {
2870 = ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV, Int8PtrTy);
2871 UsedArray.push_back(Cast);
2873 // Sort to get deterministic order.
2874 array_pod_sort(UsedArray.begin(), UsedArray.end(), compareNames);
2875 ArrayType *ATy = ArrayType::get(Int8PtrTy, UsedArray.size());
2877 Module *M = V.getParent();
2878 V.removeFromParent();
2879 GlobalVariable *NV =
2880 new GlobalVariable(*M, ATy, false, llvm::GlobalValue::AppendingLinkage,
2881 llvm::ConstantArray::get(ATy, UsedArray), "");
2883 NV->setSection("llvm.metadata");
2888 /// An easy to access representation of llvm.used and llvm.compiler.used.
2890 SmallPtrSet<GlobalValue *, 8> Used;
2891 SmallPtrSet<GlobalValue *, 8> CompilerUsed;
2892 GlobalVariable *UsedV;
2893 GlobalVariable *CompilerUsedV;
2896 LLVMUsed(Module &M) {
2897 UsedV = collectUsedGlobalVariables(M, Used, false);
2898 CompilerUsedV = collectUsedGlobalVariables(M, CompilerUsed, true);
2900 typedef SmallPtrSet<GlobalValue *, 8>::iterator iterator;
2901 typedef iterator_range<iterator> used_iterator_range;
2902 iterator usedBegin() { return Used.begin(); }
2903 iterator usedEnd() { return Used.end(); }
2904 used_iterator_range used() {
2905 return used_iterator_range(usedBegin(), usedEnd());
2907 iterator compilerUsedBegin() { return CompilerUsed.begin(); }
2908 iterator compilerUsedEnd() { return CompilerUsed.end(); }
2909 used_iterator_range compilerUsed() {
2910 return used_iterator_range(compilerUsedBegin(), compilerUsedEnd());
2912 bool usedCount(GlobalValue *GV) const { return Used.count(GV); }
2913 bool compilerUsedCount(GlobalValue *GV) const {
2914 return CompilerUsed.count(GV);
2916 bool usedErase(GlobalValue *GV) { return Used.erase(GV); }
2917 bool compilerUsedErase(GlobalValue *GV) { return CompilerUsed.erase(GV); }
2918 bool usedInsert(GlobalValue *GV) { return Used.insert(GV).second; }
2919 bool compilerUsedInsert(GlobalValue *GV) {
2920 return CompilerUsed.insert(GV).second;
2923 void syncVariablesAndSets() {
2925 setUsedInitializer(*UsedV, Used);
2927 setUsedInitializer(*CompilerUsedV, CompilerUsed);
2932 static bool hasUseOtherThanLLVMUsed(GlobalAlias &GA, const LLVMUsed &U) {
2933 if (GA.use_empty()) // No use at all.
2936 assert((!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) &&
2937 "We should have removed the duplicated "
2938 "element from llvm.compiler.used");
2939 if (!GA.hasOneUse())
2940 // Strictly more than one use. So at least one is not in llvm.used and
2941 // llvm.compiler.used.
2944 // Exactly one use. Check if it is in llvm.used or llvm.compiler.used.
2945 return !U.usedCount(&GA) && !U.compilerUsedCount(&GA);
2948 static bool hasMoreThanOneUseOtherThanLLVMUsed(GlobalValue &V,
2949 const LLVMUsed &U) {
2951 assert((!U.usedCount(&V) || !U.compilerUsedCount(&V)) &&
2952 "We should have removed the duplicated "
2953 "element from llvm.compiler.used");
2954 if (U.usedCount(&V) || U.compilerUsedCount(&V))
2956 return V.hasNUsesOrMore(N);
2959 static bool mayHaveOtherReferences(GlobalAlias &GA, const LLVMUsed &U) {
2960 if (!GA.hasLocalLinkage())
2963 return U.usedCount(&GA) || U.compilerUsedCount(&GA);
2966 static bool hasUsesToReplace(GlobalAlias &GA, const LLVMUsed &U,
2967 bool &RenameTarget) {
2968 RenameTarget = false;
2970 if (hasUseOtherThanLLVMUsed(GA, U))
2973 // If the alias is externally visible, we may still be able to simplify it.
2974 if (!mayHaveOtherReferences(GA, U))
2977 // If the aliasee has internal linkage, give it the name and linkage
2978 // of the alias, and delete the alias. This turns:
2979 // define internal ... @f(...)
2980 // @a = alias ... @f
2982 // define ... @a(...)
2983 Constant *Aliasee = GA.getAliasee();
2984 GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts());
2985 if (!Target->hasLocalLinkage())
2988 // Do not perform the transform if multiple aliases potentially target the
2989 // aliasee. This check also ensures that it is safe to replace the section
2990 // and other attributes of the aliasee with those of the alias.
2991 if (hasMoreThanOneUseOtherThanLLVMUsed(*Target, U))
2994 RenameTarget = true;
2998 bool GlobalOpt::OptimizeGlobalAliases(Module &M) {
2999 bool Changed = false;
3002 for (GlobalValue *GV : Used.used())
3003 Used.compilerUsedErase(GV);
3005 for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end();
3007 GlobalAlias *J = &*I++;
3009 // Aliases without names cannot be referenced outside this module.
3010 if (!J->hasName() && !J->isDeclaration() && !J->hasLocalLinkage())
3011 J->setLinkage(GlobalValue::InternalLinkage);
3013 if (deleteIfDead(*J)) {
3018 // If the aliasee may change at link time, nothing can be done - bail out.
3019 if (J->mayBeOverridden())
3022 Constant *Aliasee = J->getAliasee();
3023 GlobalValue *Target = dyn_cast<GlobalValue>(Aliasee->stripPointerCasts());
3024 // We can't trivially replace the alias with the aliasee if the aliasee is
3025 // non-trivial in some way.
3026 // TODO: Try to handle non-zero GEPs of local aliasees.
3029 Target->removeDeadConstantUsers();
3031 // Make all users of the alias use the aliasee instead.
3033 if (!hasUsesToReplace(*J, Used, RenameTarget))
3036 J->replaceAllUsesWith(ConstantExpr::getBitCast(Aliasee, J->getType()));
3037 ++NumAliasesResolved;
3041 // Give the aliasee the name, linkage and other attributes of the alias.
3042 Target->takeName(&*J);
3043 Target->setLinkage(J->getLinkage());
3044 Target->setVisibility(J->getVisibility());
3045 Target->setDLLStorageClass(J->getDLLStorageClass());
3047 if (Used.usedErase(&*J))
3048 Used.usedInsert(Target);
3050 if (Used.compilerUsedErase(&*J))
3051 Used.compilerUsedInsert(Target);
3052 } else if (mayHaveOtherReferences(*J, Used))
3055 // Delete the alias.
3056 M.getAliasList().erase(J);
3057 ++NumAliasesRemoved;
3061 Used.syncVariablesAndSets();
3066 static Function *FindCXAAtExit(Module &M, TargetLibraryInfo *TLI) {
3067 if (!TLI->has(LibFunc::cxa_atexit))
3070 Function *Fn = M.getFunction(TLI->getName(LibFunc::cxa_atexit));
3075 FunctionType *FTy = Fn->getFunctionType();
3077 // Checking that the function has the right return type, the right number of
3078 // parameters and that they all have pointer types should be enough.
3079 if (!FTy->getReturnType()->isIntegerTy() ||
3080 FTy->getNumParams() != 3 ||
3081 !FTy->getParamType(0)->isPointerTy() ||
3082 !FTy->getParamType(1)->isPointerTy() ||
3083 !FTy->getParamType(2)->isPointerTy())
3089 /// Returns whether the given function is an empty C++ destructor and can
3090 /// therefore be eliminated.
3091 /// Note that we assume that other optimization passes have already simplified
3092 /// the code so we only look for a function with a single basic block, where
3093 /// the only allowed instructions are 'ret', 'call' to an empty C++ dtor and
3094 /// other side-effect free instructions.
3095 static bool cxxDtorIsEmpty(const Function &Fn,
3096 SmallPtrSet<const Function *, 8> &CalledFunctions) {
3097 // FIXME: We could eliminate C++ destructors if they're readonly/readnone and
3098 // nounwind, but that doesn't seem worth doing.
3099 if (Fn.isDeclaration())
3102 if (++Fn.begin() != Fn.end())
3105 const BasicBlock &EntryBlock = Fn.getEntryBlock();
3106 for (BasicBlock::const_iterator I = EntryBlock.begin(), E = EntryBlock.end();
3108 if (const CallInst *CI = dyn_cast<CallInst>(I)) {
3109 // Ignore debug intrinsics.
3110 if (isa<DbgInfoIntrinsic>(CI))
3113 const Function *CalledFn = CI->getCalledFunction();
3118 SmallPtrSet<const Function *, 8> NewCalledFunctions(CalledFunctions);
3120 // Don't treat recursive functions as empty.
3121 if (!NewCalledFunctions.insert(CalledFn).second)
3124 if (!cxxDtorIsEmpty(*CalledFn, NewCalledFunctions))
3126 } else if (isa<ReturnInst>(*I))
3127 return true; // We're done.
3128 else if (I->mayHaveSideEffects())
3129 return false; // Destructor with side effects, bail.
3135 bool GlobalOpt::OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) {
3136 /// Itanium C++ ABI p3.3.5:
3138 /// After constructing a global (or local static) object, that will require
3139 /// destruction on exit, a termination function is registered as follows:
3141 /// extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d );
3143 /// This registration, e.g. __cxa_atexit(f,p,d), is intended to cause the
3144 /// call f(p) when DSO d is unloaded, before all such termination calls
3145 /// registered before this one. It returns zero if registration is
3146 /// successful, nonzero on failure.
3148 // This pass will look for calls to __cxa_atexit where the function is trivial
3150 bool Changed = false;
3152 for (auto I = CXAAtExitFn->user_begin(), E = CXAAtExitFn->user_end();
3154 // We're only interested in calls. Theoretically, we could handle invoke
3155 // instructions as well, but neither llvm-gcc nor clang generate invokes
3157 CallInst *CI = dyn_cast<CallInst>(*I++);
3162 dyn_cast<Function>(CI->getArgOperand(0)->stripPointerCasts());
3166 SmallPtrSet<const Function *, 8> CalledFunctions;
3167 if (!cxxDtorIsEmpty(*DtorFn, CalledFunctions))
3170 // Just remove the call.
3171 CI->replaceAllUsesWith(Constant::getNullValue(CI->getType()));
3172 CI->eraseFromParent();
3174 ++NumCXXDtorsRemoved;
3182 bool GlobalOpt::runOnModule(Module &M) {
3183 bool Changed = false;
3185 auto &DL = M.getDataLayout();
3186 TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
3188 bool LocalChange = true;
3189 while (LocalChange) {
3190 LocalChange = false;
3192 NotDiscardableComdats.clear();
3193 for (const GlobalVariable &GV : M.globals())
3194 if (const Comdat *C = GV.getComdat())
3195 if (!GV.isDiscardableIfUnused() || !GV.use_empty())
3196 NotDiscardableComdats.insert(C);
3197 for (Function &F : M)
3198 if (const Comdat *C = F.getComdat())
3199 if (!F.isDefTriviallyDead())
3200 NotDiscardableComdats.insert(C);
3201 for (GlobalAlias &GA : M.aliases())
3202 if (const Comdat *C = GA.getComdat())
3203 if (!GA.isDiscardableIfUnused() || !GA.use_empty())
3204 NotDiscardableComdats.insert(C);
3206 // Delete functions that are trivially dead, ccc -> fastcc
3207 LocalChange |= OptimizeFunctions(M);
3209 // Optimize global_ctors list.
3210 LocalChange |= optimizeGlobalCtorsList(M, [&](Function *F) {
3211 return EvaluateStaticConstructor(F, DL, TLI);
3214 // Optimize non-address-taken globals.
3215 LocalChange |= OptimizeGlobalVars(M);
3217 // Resolve aliases, when possible.
3218 LocalChange |= OptimizeGlobalAliases(M);
3220 // Try to remove trivial global destructors if they are not removed
3222 Function *CXAAtExitFn = FindCXAAtExit(M, TLI);
3224 LocalChange |= OptimizeEmptyGlobalCXXDtors(CXAAtExitFn);
3226 Changed |= LocalChange;
3229 // TODO: Move all global ctors functions to the end of the module for code