1 //===- GlobalOpt.cpp - Optimize Global Variables --------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass transforms simple global variables that never have their address
11 // taken. If obviously true, it marks read/write globals as constant, deletes
12 // variables only stored to, etc.
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "globalopt"
17 #include "llvm/Transforms/IPO.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/ConstantFolding.h"
24 #include "llvm/Analysis/MemoryBuiltins.h"
25 #include "llvm/CallingConv.h"
26 #include "llvm/Constants.h"
27 #include "llvm/DataLayout.h"
28 #include "llvm/DerivedTypes.h"
29 #include "llvm/Instructions.h"
30 #include "llvm/IntrinsicInst.h"
31 #include "llvm/Module.h"
32 #include "llvm/Operator.h"
33 #include "llvm/Pass.h"
34 #include "llvm/Support/CallSite.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Support/GetElementPtrTypeIterator.h"
38 #include "llvm/Support/MathExtras.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/Target/TargetLibraryInfo.h"
44 STATISTIC(NumMarked , "Number of globals marked constant");
45 STATISTIC(NumUnnamed , "Number of globals marked unnamed_addr");
46 STATISTIC(NumSRA , "Number of aggregate globals broken into scalars");
47 STATISTIC(NumHeapSRA , "Number of heap objects SRA'd");
48 STATISTIC(NumSubstitute,"Number of globals with initializers stored into them");
49 STATISTIC(NumDeleted , "Number of globals deleted");
50 STATISTIC(NumFnDeleted , "Number of functions deleted");
51 STATISTIC(NumGlobUses , "Number of global uses devirtualized");
52 STATISTIC(NumLocalized , "Number of globals localized");
53 STATISTIC(NumShrunkToBool , "Number of global vars shrunk to booleans");
54 STATISTIC(NumFastCallFns , "Number of functions converted to fastcc");
55 STATISTIC(NumCtorsEvaluated, "Number of static ctors evaluated");
56 STATISTIC(NumNestRemoved , "Number of nest attributes removed");
57 STATISTIC(NumAliasesResolved, "Number of global aliases resolved");
58 STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated");
59 STATISTIC(NumCXXDtorsRemoved, "Number of global C++ destructors removed");
63 struct GlobalOpt : public ModulePass {
64 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<TargetLibraryInfo>();
67 static char ID; // Pass identification, replacement for typeid
68 GlobalOpt() : ModulePass(ID) {
69 initializeGlobalOptPass(*PassRegistry::getPassRegistry());
72 bool runOnModule(Module &M);
75 GlobalVariable *FindGlobalCtors(Module &M);
76 bool OptimizeFunctions(Module &M);
77 bool OptimizeGlobalVars(Module &M);
78 bool OptimizeGlobalAliases(Module &M);
79 bool OptimizeGlobalCtorsList(GlobalVariable *&GCL);
80 bool ProcessGlobal(GlobalVariable *GV,Module::global_iterator &GVI);
81 bool ProcessInternalGlobal(GlobalVariable *GV,Module::global_iterator &GVI,
82 const SmallPtrSet<const PHINode*, 16> &PHIUsers,
83 const GlobalStatus &GS);
84 bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn);
87 TargetLibraryInfo *TLI;
91 char GlobalOpt::ID = 0;
92 INITIALIZE_PASS_BEGIN(GlobalOpt, "globalopt",
93 "Global Variable Optimizer", false, false)
94 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
95 INITIALIZE_PASS_END(GlobalOpt, "globalopt",
96 "Global Variable Optimizer", false, false)
98 ModulePass *llvm::createGlobalOptimizerPass() { return new GlobalOpt(); }
102 /// GlobalStatus - As we analyze each global, keep track of some information
103 /// about it. If we find out that the address of the global is taken, none of
104 /// this info will be accurate.
105 struct GlobalStatus {
106 /// isCompared - True if the global's address is used in a comparison.
109 /// isLoaded - True if the global is ever loaded. If the global isn't ever
110 /// loaded it can be deleted.
113 /// StoredType - Keep track of what stores to the global look like.
116 /// NotStored - There is no store to this global. It can thus be marked
120 /// isInitializerStored - This global is stored to, but the only thing
121 /// stored is the constant it was initialized with. This is only tracked
122 /// for scalar globals.
125 /// isStoredOnce - This global is stored to, but only its initializer and
126 /// one other value is ever stored to it. If this global isStoredOnce, we
127 /// track the value stored to it in StoredOnceValue below. This is only
128 /// tracked for scalar globals.
131 /// isStored - This global is stored to by multiple values or something else
132 /// that we cannot track.
136 /// StoredOnceValue - If only one value (besides the initializer constant) is
137 /// ever stored to this global, keep track of what value it is.
138 Value *StoredOnceValue;
140 /// AccessingFunction/HasMultipleAccessingFunctions - These start out
141 /// null/false. When the first accessing function is noticed, it is recorded.
142 /// When a second different accessing function is noticed,
143 /// HasMultipleAccessingFunctions is set to true.
144 const Function *AccessingFunction;
145 bool HasMultipleAccessingFunctions;
147 /// HasNonInstructionUser - Set to true if this global has a user that is not
148 /// an instruction (e.g. a constant expr or GV initializer).
149 bool HasNonInstructionUser;
151 /// HasPHIUser - Set to true if this global has a user that is a PHI node.
154 /// AtomicOrdering - Set to the strongest atomic ordering requirement.
155 AtomicOrdering Ordering;
157 GlobalStatus() : isCompared(false), isLoaded(false), StoredType(NotStored),
158 StoredOnceValue(0), AccessingFunction(0),
159 HasMultipleAccessingFunctions(false),
160 HasNonInstructionUser(false), HasPHIUser(false),
161 Ordering(NotAtomic) {}
166 /// StrongerOrdering - Return the stronger of the two ordering. If the two
167 /// orderings are acquire and release, then return AcquireRelease.
169 static AtomicOrdering StrongerOrdering(AtomicOrdering X, AtomicOrdering Y) {
170 if (X == Acquire && Y == Release) return AcquireRelease;
171 if (Y == Acquire && X == Release) return AcquireRelease;
172 return (AtomicOrdering)std::max(X, Y);
175 /// SafeToDestroyConstant - It is safe to destroy a constant iff it is only used
176 /// by constants itself. Note that constants cannot be cyclic, so this test is
177 /// pretty easy to implement recursively.
179 static bool SafeToDestroyConstant(const Constant *C) {
180 if (isa<GlobalValue>(C)) return false;
182 for (Value::const_use_iterator UI = C->use_begin(), E = C->use_end(); UI != E;
184 if (const Constant *CU = dyn_cast<Constant>(*UI)) {
185 if (!SafeToDestroyConstant(CU)) return false;
192 /// AnalyzeGlobal - Look at all uses of the global and fill in the GlobalStatus
193 /// structure. If the global has its address taken, return true to indicate we
194 /// can't do anything with it.
196 static bool AnalyzeGlobal(const Value *V, GlobalStatus &GS,
197 SmallPtrSet<const PHINode*, 16> &PHIUsers) {
198 for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;
201 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
202 GS.HasNonInstructionUser = true;
204 // If the result of the constantexpr isn't pointer type, then we won't
205 // know to expect it in various places. Just reject early.
206 if (!isa<PointerType>(CE->getType())) return true;
208 if (AnalyzeGlobal(CE, GS, PHIUsers)) return true;
209 } else if (const Instruction *I = dyn_cast<Instruction>(U)) {
210 if (!GS.HasMultipleAccessingFunctions) {
211 const Function *F = I->getParent()->getParent();
212 if (GS.AccessingFunction == 0)
213 GS.AccessingFunction = F;
214 else if (GS.AccessingFunction != F)
215 GS.HasMultipleAccessingFunctions = true;
217 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) {
219 // Don't hack on volatile loads.
220 if (LI->isVolatile()) return true;
221 GS.Ordering = StrongerOrdering(GS.Ordering, LI->getOrdering());
222 } else if (const StoreInst *SI = dyn_cast<StoreInst>(I)) {
223 // Don't allow a store OF the address, only stores TO the address.
224 if (SI->getOperand(0) == V) return true;
226 // Don't hack on volatile stores.
227 if (SI->isVolatile()) return true;
229 GS.Ordering = StrongerOrdering(GS.Ordering, SI->getOrdering());
231 // If this is a direct store to the global (i.e., the global is a scalar
232 // value, not an aggregate), keep more specific information about
234 if (GS.StoredType != GlobalStatus::isStored) {
235 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(
236 SI->getOperand(1))) {
237 Value *StoredVal = SI->getOperand(0);
239 if (Constant *C = dyn_cast<Constant>(StoredVal)) {
240 if (C->isThreadDependent()) {
241 // The stored value changes between threads; don't track it.
246 if (StoredVal == GV->getInitializer()) {
247 if (GS.StoredType < GlobalStatus::isInitializerStored)
248 GS.StoredType = GlobalStatus::isInitializerStored;
249 } else if (isa<LoadInst>(StoredVal) &&
250 cast<LoadInst>(StoredVal)->getOperand(0) == GV) {
251 if (GS.StoredType < GlobalStatus::isInitializerStored)
252 GS.StoredType = GlobalStatus::isInitializerStored;
253 } else if (GS.StoredType < GlobalStatus::isStoredOnce) {
254 GS.StoredType = GlobalStatus::isStoredOnce;
255 GS.StoredOnceValue = StoredVal;
256 } else if (GS.StoredType == GlobalStatus::isStoredOnce &&
257 GS.StoredOnceValue == StoredVal) {
260 GS.StoredType = GlobalStatus::isStored;
263 GS.StoredType = GlobalStatus::isStored;
266 } else if (isa<BitCastInst>(I)) {
267 if (AnalyzeGlobal(I, GS, PHIUsers)) return true;
268 } else if (isa<GetElementPtrInst>(I)) {
269 if (AnalyzeGlobal(I, GS, PHIUsers)) return true;
270 } else if (isa<SelectInst>(I)) {
271 if (AnalyzeGlobal(I, GS, PHIUsers)) return true;
272 } else if (const PHINode *PN = dyn_cast<PHINode>(I)) {
273 // PHI nodes we can check just like select or GEP instructions, but we
274 // have to be careful about infinite recursion.
275 if (PHIUsers.insert(PN)) // Not already visited.
276 if (AnalyzeGlobal(I, GS, PHIUsers)) return true;
277 GS.HasPHIUser = true;
278 } else if (isa<CmpInst>(I)) {
279 GS.isCompared = true;
280 } else if (const MemTransferInst *MTI = dyn_cast<MemTransferInst>(I)) {
281 if (MTI->isVolatile()) return true;
282 if (MTI->getArgOperand(0) == V)
283 GS.StoredType = GlobalStatus::isStored;
284 if (MTI->getArgOperand(1) == V)
286 } else if (const MemSetInst *MSI = dyn_cast<MemSetInst>(I)) {
287 assert(MSI->getArgOperand(0) == V && "Memset only takes one pointer!");
288 if (MSI->isVolatile()) return true;
289 GS.StoredType = GlobalStatus::isStored;
291 return true; // Any other non-load instruction might take address!
293 } else if (const Constant *C = dyn_cast<Constant>(U)) {
294 GS.HasNonInstructionUser = true;
295 // We might have a dead and dangling constant hanging off of here.
296 if (!SafeToDestroyConstant(C))
299 GS.HasNonInstructionUser = true;
300 // Otherwise must be some other user.
308 /// isLeakCheckerRoot - Is this global variable possibly used by a leak checker
309 /// as a root? If so, we might not really want to eliminate the stores to it.
310 static bool isLeakCheckerRoot(GlobalVariable *GV) {
311 // A global variable is a root if it is a pointer, or could plausibly contain
312 // a pointer. There are two challenges; one is that we could have a struct
313 // the has an inner member which is a pointer. We recurse through the type to
314 // detect these (up to a point). The other is that we may actually be a union
315 // of a pointer and another type, and so our LLVM type is an integer which
316 // gets converted into a pointer, or our type is an [i8 x #] with a pointer
317 // potentially contained here.
319 if (GV->hasPrivateLinkage())
322 SmallVector<Type *, 4> Types;
323 Types.push_back(cast<PointerType>(GV->getType())->getElementType());
327 Type *Ty = Types.pop_back_val();
328 switch (Ty->getTypeID()) {
330 case Type::PointerTyID: return true;
331 case Type::ArrayTyID:
332 case Type::VectorTyID: {
333 SequentialType *STy = cast<SequentialType>(Ty);
334 Types.push_back(STy->getElementType());
337 case Type::StructTyID: {
338 StructType *STy = cast<StructType>(Ty);
339 if (STy->isOpaque()) return true;
340 for (StructType::element_iterator I = STy->element_begin(),
341 E = STy->element_end(); I != E; ++I) {
343 if (isa<PointerType>(InnerTy)) return true;
344 if (isa<CompositeType>(InnerTy))
345 Types.push_back(InnerTy);
350 if (--Limit == 0) return true;
351 } while (!Types.empty());
355 /// Given a value that is stored to a global but never read, determine whether
356 /// it's safe to remove the store and the chain of computation that feeds the
358 static bool IsSafeComputationToRemove(Value *V, const TargetLibraryInfo *TLI) {
360 if (isa<Constant>(V))
364 if (isa<LoadInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V) ||
367 if (isAllocationFn(V, TLI))
370 Instruction *I = cast<Instruction>(V);
371 if (I->mayHaveSideEffects())
373 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
374 if (!GEP->hasAllConstantIndices())
376 } else if (I->getNumOperands() != 1) {
380 V = I->getOperand(0);
384 /// CleanupPointerRootUsers - This GV is a pointer root. Loop over all users
385 /// of the global and clean up any that obviously don't assign the global a
386 /// value that isn't dynamically allocated.
388 static bool CleanupPointerRootUsers(GlobalVariable *GV,
389 const TargetLibraryInfo *TLI) {
390 // A brief explanation of leak checkers. The goal is to find bugs where
391 // pointers are forgotten, causing an accumulating growth in memory
392 // usage over time. The common strategy for leak checkers is to whitelist the
393 // memory pointed to by globals at exit. This is popular because it also
394 // solves another problem where the main thread of a C++ program may shut down
395 // before other threads that are still expecting to use those globals. To
396 // handle that case, we expect the program may create a singleton and never
399 bool Changed = false;
401 // If Dead[n].first is the only use of a malloc result, we can delete its
402 // chain of computation and the store to the global in Dead[n].second.
403 SmallVector<std::pair<Instruction *, Instruction *>, 32> Dead;
405 // Constants can't be pointers to dynamically allocated memory.
406 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end();
409 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
410 Value *V = SI->getValueOperand();
411 if (isa<Constant>(V)) {
413 SI->eraseFromParent();
414 } else if (Instruction *I = dyn_cast<Instruction>(V)) {
416 Dead.push_back(std::make_pair(I, SI));
418 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(U)) {
419 if (isa<Constant>(MSI->getValue())) {
421 MSI->eraseFromParent();
422 } else if (Instruction *I = dyn_cast<Instruction>(MSI->getValue())) {
424 Dead.push_back(std::make_pair(I, MSI));
426 } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(U)) {
427 GlobalVariable *MemSrc = dyn_cast<GlobalVariable>(MTI->getSource());
428 if (MemSrc && MemSrc->isConstant()) {
430 MTI->eraseFromParent();
431 } else if (Instruction *I = dyn_cast<Instruction>(MemSrc)) {
433 Dead.push_back(std::make_pair(I, MTI));
435 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
436 if (CE->use_empty()) {
437 CE->destroyConstant();
440 } else if (Constant *C = dyn_cast<Constant>(U)) {
441 if (SafeToDestroyConstant(C)) {
442 C->destroyConstant();
443 // This could have invalidated UI, start over from scratch.
445 CleanupPointerRootUsers(GV, TLI);
451 for (int i = 0, e = Dead.size(); i != e; ++i) {
452 if (IsSafeComputationToRemove(Dead[i].first, TLI)) {
453 Dead[i].second->eraseFromParent();
454 Instruction *I = Dead[i].first;
456 if (isAllocationFn(I, TLI))
458 Instruction *J = dyn_cast<Instruction>(I->getOperand(0));
461 I->eraseFromParent();
464 I->eraseFromParent();
471 /// CleanupConstantGlobalUsers - We just marked GV constant. Loop over all
472 /// users of the global, cleaning up the obvious ones. This is largely just a
473 /// quick scan over the use list to clean up the easy and obvious cruft. This
474 /// returns true if it made a change.
475 static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
476 DataLayout *TD, TargetLibraryInfo *TLI) {
477 bool Changed = false;
478 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;) {
481 if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
483 // Replace the load with the initializer.
484 LI->replaceAllUsesWith(Init);
485 LI->eraseFromParent();
488 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
489 // Store must be unreachable or storing Init into the global.
490 SI->eraseFromParent();
492 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
493 if (CE->getOpcode() == Instruction::GetElementPtr) {
494 Constant *SubInit = 0;
496 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
497 Changed |= CleanupConstantGlobalUsers(CE, SubInit, TD, TLI);
498 } else if (CE->getOpcode() == Instruction::BitCast &&
499 CE->getType()->isPointerTy()) {
500 // Pointer cast, delete any stores and memsets to the global.
501 Changed |= CleanupConstantGlobalUsers(CE, 0, TD, TLI);
504 if (CE->use_empty()) {
505 CE->destroyConstant();
508 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
509 // Do not transform "gepinst (gep constexpr (GV))" here, because forming
510 // "gepconstexpr (gep constexpr (GV))" will cause the two gep's to fold
511 // and will invalidate our notion of what Init is.
512 Constant *SubInit = 0;
513 if (!isa<ConstantExpr>(GEP->getOperand(0))) {
515 dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP, TD, TLI));
516 if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr)
517 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
519 // If the initializer is an all-null value and we have an inbounds GEP,
520 // we already know what the result of any load from that GEP is.
521 // TODO: Handle splats.
522 if (Init && isa<ConstantAggregateZero>(Init) && GEP->isInBounds())
523 SubInit = Constant::getNullValue(GEP->getType()->getElementType());
525 Changed |= CleanupConstantGlobalUsers(GEP, SubInit, TD, TLI);
527 if (GEP->use_empty()) {
528 GEP->eraseFromParent();
531 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U)) { // memset/cpy/mv
532 if (MI->getRawDest() == V) {
533 MI->eraseFromParent();
537 } else if (Constant *C = dyn_cast<Constant>(U)) {
538 // If we have a chain of dead constantexprs or other things dangling from
539 // us, and if they are all dead, nuke them without remorse.
540 if (SafeToDestroyConstant(C)) {
541 C->destroyConstant();
542 // This could have invalidated UI, start over from scratch.
543 CleanupConstantGlobalUsers(V, Init, TD, TLI);
551 /// isSafeSROAElementUse - Return true if the specified instruction is a safe
552 /// user of a derived expression from a global that we want to SROA.
553 static bool isSafeSROAElementUse(Value *V) {
554 // We might have a dead and dangling constant hanging off of here.
555 if (Constant *C = dyn_cast<Constant>(V))
556 return SafeToDestroyConstant(C);
558 Instruction *I = dyn_cast<Instruction>(V);
559 if (!I) return false;
562 if (isa<LoadInst>(I)) return true;
564 // Stores *to* the pointer are ok.
565 if (StoreInst *SI = dyn_cast<StoreInst>(I))
566 return SI->getOperand(0) != V;
568 // Otherwise, it must be a GEP.
569 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I);
570 if (GEPI == 0) return false;
572 if (GEPI->getNumOperands() < 3 || !isa<Constant>(GEPI->getOperand(1)) ||
573 !cast<Constant>(GEPI->getOperand(1))->isNullValue())
576 for (Value::use_iterator I = GEPI->use_begin(), E = GEPI->use_end();
578 if (!isSafeSROAElementUse(*I))
584 /// IsUserOfGlobalSafeForSRA - U is a direct user of the specified global value.
585 /// Look at it and its uses and decide whether it is safe to SROA this global.
587 static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) {
588 // The user of the global must be a GEP Inst or a ConstantExpr GEP.
589 if (!isa<GetElementPtrInst>(U) &&
590 (!isa<ConstantExpr>(U) ||
591 cast<ConstantExpr>(U)->getOpcode() != Instruction::GetElementPtr))
594 // Check to see if this ConstantExpr GEP is SRA'able. In particular, we
595 // don't like < 3 operand CE's, and we don't like non-constant integer
596 // indices. This enforces that all uses are 'gep GV, 0, C, ...' for some
598 if (U->getNumOperands() < 3 || !isa<Constant>(U->getOperand(1)) ||
599 !cast<Constant>(U->getOperand(1))->isNullValue() ||
600 !isa<ConstantInt>(U->getOperand(2)))
603 gep_type_iterator GEPI = gep_type_begin(U), E = gep_type_end(U);
604 ++GEPI; // Skip over the pointer index.
606 // If this is a use of an array allocation, do a bit more checking for sanity.
607 if (ArrayType *AT = dyn_cast<ArrayType>(*GEPI)) {
608 uint64_t NumElements = AT->getNumElements();
609 ConstantInt *Idx = cast<ConstantInt>(U->getOperand(2));
611 // Check to make sure that index falls within the array. If not,
612 // something funny is going on, so we won't do the optimization.
614 if (Idx->getZExtValue() >= NumElements)
617 // We cannot scalar repl this level of the array unless any array
618 // sub-indices are in-range constants. In particular, consider:
619 // A[0][i]. We cannot know that the user isn't doing invalid things like
620 // allowing i to index an out-of-range subscript that accesses A[1].
622 // Scalar replacing *just* the outer index of the array is probably not
623 // going to be a win anyway, so just give up.
624 for (++GEPI; // Skip array index.
627 uint64_t NumElements;
628 if (ArrayType *SubArrayTy = dyn_cast<ArrayType>(*GEPI))
629 NumElements = SubArrayTy->getNumElements();
630 else if (VectorType *SubVectorTy = dyn_cast<VectorType>(*GEPI))
631 NumElements = SubVectorTy->getNumElements();
633 assert((*GEPI)->isStructTy() &&
634 "Indexed GEP type is not array, vector, or struct!");
638 ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand());
639 if (!IdxVal || IdxVal->getZExtValue() >= NumElements)
644 for (Value::use_iterator I = U->use_begin(), E = U->use_end(); I != E; ++I)
645 if (!isSafeSROAElementUse(*I))
650 /// GlobalUsersSafeToSRA - Look at all uses of the global and decide whether it
651 /// is safe for us to perform this transformation.
653 static bool GlobalUsersSafeToSRA(GlobalValue *GV) {
654 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end();
656 if (!IsUserOfGlobalSafeForSRA(*UI, GV))
663 /// SRAGlobal - Perform scalar replacement of aggregates on the specified global
664 /// variable. This opens the door for other optimizations by exposing the
665 /// behavior of the program in a more fine-grained way. We have determined that
666 /// this transformation is safe already. We return the first global variable we
667 /// insert so that the caller can reprocess it.
668 static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &TD) {
669 // Make sure this global only has simple uses that we can SRA.
670 if (!GlobalUsersSafeToSRA(GV))
673 assert(GV->hasLocalLinkage() && !GV->isConstant());
674 Constant *Init = GV->getInitializer();
675 Type *Ty = Init->getType();
677 std::vector<GlobalVariable*> NewGlobals;
678 Module::GlobalListType &Globals = GV->getParent()->getGlobalList();
680 // Get the alignment of the global, either explicit or target-specific.
681 unsigned StartAlignment = GV->getAlignment();
682 if (StartAlignment == 0)
683 StartAlignment = TD.getABITypeAlignment(GV->getType());
685 if (StructType *STy = dyn_cast<StructType>(Ty)) {
686 NewGlobals.reserve(STy->getNumElements());
687 const StructLayout &Layout = *TD.getStructLayout(STy);
688 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
689 Constant *In = Init->getAggregateElement(i);
690 assert(In && "Couldn't get element of initializer?");
691 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(i), false,
692 GlobalVariable::InternalLinkage,
693 In, GV->getName()+"."+Twine(i),
694 GV->getThreadLocalMode(),
695 GV->getType()->getAddressSpace());
696 Globals.insert(GV, NGV);
697 NewGlobals.push_back(NGV);
699 // Calculate the known alignment of the field. If the original aggregate
700 // had 256 byte alignment for example, something might depend on that:
701 // propagate info to each field.
702 uint64_t FieldOffset = Layout.getElementOffset(i);
703 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, FieldOffset);
704 if (NewAlign > TD.getABITypeAlignment(STy->getElementType(i)))
705 NGV->setAlignment(NewAlign);
707 } else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) {
708 unsigned NumElements = 0;
709 if (ArrayType *ATy = dyn_cast<ArrayType>(STy))
710 NumElements = ATy->getNumElements();
712 NumElements = cast<VectorType>(STy)->getNumElements();
714 if (NumElements > 16 && GV->hasNUsesOrMore(16))
715 return 0; // It's not worth it.
716 NewGlobals.reserve(NumElements);
718 uint64_t EltSize = TD.getTypeAllocSize(STy->getElementType());
719 unsigned EltAlign = TD.getABITypeAlignment(STy->getElementType());
720 for (unsigned i = 0, e = NumElements; i != e; ++i) {
721 Constant *In = Init->getAggregateElement(i);
722 assert(In && "Couldn't get element of initializer?");
724 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(), false,
725 GlobalVariable::InternalLinkage,
726 In, GV->getName()+"."+Twine(i),
727 GV->getThreadLocalMode(),
728 GV->getType()->getAddressSpace());
729 Globals.insert(GV, NGV);
730 NewGlobals.push_back(NGV);
732 // Calculate the known alignment of the field. If the original aggregate
733 // had 256 byte alignment for example, something might depend on that:
734 // propagate info to each field.
735 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, EltSize*i);
736 if (NewAlign > EltAlign)
737 NGV->setAlignment(NewAlign);
741 if (NewGlobals.empty())
744 DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV);
746 Constant *NullInt =Constant::getNullValue(Type::getInt32Ty(GV->getContext()));
748 // Loop over all of the uses of the global, replacing the constantexpr geps,
749 // with smaller constantexpr geps or direct references.
750 while (!GV->use_empty()) {
751 User *GEP = GV->use_back();
752 assert(((isa<ConstantExpr>(GEP) &&
753 cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)||
754 isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!");
756 // Ignore the 1th operand, which has to be zero or else the program is quite
757 // broken (undefined). Get the 2nd operand, which is the structure or array
759 unsigned Val = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue();
760 if (Val >= NewGlobals.size()) Val = 0; // Out of bound array access.
762 Value *NewPtr = NewGlobals[Val];
764 // Form a shorter GEP if needed.
765 if (GEP->getNumOperands() > 3) {
766 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GEP)) {
767 SmallVector<Constant*, 8> Idxs;
768 Idxs.push_back(NullInt);
769 for (unsigned i = 3, e = CE->getNumOperands(); i != e; ++i)
770 Idxs.push_back(CE->getOperand(i));
771 NewPtr = ConstantExpr::getGetElementPtr(cast<Constant>(NewPtr), Idxs);
773 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(GEP);
774 SmallVector<Value*, 8> Idxs;
775 Idxs.push_back(NullInt);
776 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i)
777 Idxs.push_back(GEPI->getOperand(i));
778 NewPtr = GetElementPtrInst::Create(NewPtr, Idxs,
779 GEPI->getName()+"."+Twine(Val),GEPI);
782 GEP->replaceAllUsesWith(NewPtr);
784 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(GEP))
785 GEPI->eraseFromParent();
787 cast<ConstantExpr>(GEP)->destroyConstant();
790 // Delete the old global, now that it is dead.
794 // Loop over the new globals array deleting any globals that are obviously
795 // dead. This can arise due to scalarization of a structure or an array that
796 // has elements that are dead.
797 unsigned FirstGlobal = 0;
798 for (unsigned i = 0, e = NewGlobals.size(); i != e; ++i)
799 if (NewGlobals[i]->use_empty()) {
800 Globals.erase(NewGlobals[i]);
801 if (FirstGlobal == i) ++FirstGlobal;
804 return FirstGlobal != NewGlobals.size() ? NewGlobals[FirstGlobal] : 0;
807 /// AllUsesOfValueWillTrapIfNull - Return true if all users of the specified
808 /// value will trap if the value is dynamically null. PHIs keeps track of any
809 /// phi nodes we've seen to avoid reprocessing them.
810 static bool AllUsesOfValueWillTrapIfNull(const Value *V,
811 SmallPtrSet<const PHINode*, 8> &PHIs) {
812 for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;
816 if (isa<LoadInst>(U)) {
818 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
819 if (SI->getOperand(0) == V) {
820 //cerr << "NONTRAPPING USE: " << *U;
821 return false; // Storing the value.
823 } else if (const CallInst *CI = dyn_cast<CallInst>(U)) {
824 if (CI->getCalledValue() != V) {
825 //cerr << "NONTRAPPING USE: " << *U;
826 return false; // Not calling the ptr
828 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(U)) {
829 if (II->getCalledValue() != V) {
830 //cerr << "NONTRAPPING USE: " << *U;
831 return false; // Not calling the ptr
833 } else if (const BitCastInst *CI = dyn_cast<BitCastInst>(U)) {
834 if (!AllUsesOfValueWillTrapIfNull(CI, PHIs)) return false;
835 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
836 if (!AllUsesOfValueWillTrapIfNull(GEPI, PHIs)) return false;
837 } else if (const PHINode *PN = dyn_cast<PHINode>(U)) {
838 // If we've already seen this phi node, ignore it, it has already been
840 if (PHIs.insert(PN) && !AllUsesOfValueWillTrapIfNull(PN, PHIs))
842 } else if (isa<ICmpInst>(U) &&
843 isa<ConstantPointerNull>(UI->getOperand(1))) {
844 // Ignore icmp X, null
846 //cerr << "NONTRAPPING USE: " << *U;
853 /// AllUsesOfLoadedValueWillTrapIfNull - Return true if all uses of any loads
854 /// from GV will trap if the loaded value is null. Note that this also permits
855 /// comparisons of the loaded value against null, as a special case.
856 static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) {
857 for (Value::const_use_iterator UI = GV->use_begin(), E = GV->use_end();
861 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
862 SmallPtrSet<const PHINode*, 8> PHIs;
863 if (!AllUsesOfValueWillTrapIfNull(LI, PHIs))
865 } else if (isa<StoreInst>(U)) {
866 // Ignore stores to the global.
868 // We don't know or understand this user, bail out.
869 //cerr << "UNKNOWN USER OF GLOBAL!: " << *U;
876 static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) {
877 bool Changed = false;
878 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ) {
879 Instruction *I = cast<Instruction>(*UI++);
880 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
881 LI->setOperand(0, NewV);
883 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
884 if (SI->getOperand(1) == V) {
885 SI->setOperand(1, NewV);
888 } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
890 if (CS.getCalledValue() == V) {
891 // Calling through the pointer! Turn into a direct call, but be careful
892 // that the pointer is not also being passed as an argument.
893 CS.setCalledFunction(NewV);
895 bool PassedAsArg = false;
896 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
897 if (CS.getArgument(i) == V) {
899 CS.setArgument(i, NewV);
903 // Being passed as an argument also. Be careful to not invalidate UI!
907 } else if (CastInst *CI = dyn_cast<CastInst>(I)) {
908 Changed |= OptimizeAwayTrappingUsesOfValue(CI,
909 ConstantExpr::getCast(CI->getOpcode(),
910 NewV, CI->getType()));
911 if (CI->use_empty()) {
913 CI->eraseFromParent();
915 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
916 // Should handle GEP here.
917 SmallVector<Constant*, 8> Idxs;
918 Idxs.reserve(GEPI->getNumOperands()-1);
919 for (User::op_iterator i = GEPI->op_begin() + 1, e = GEPI->op_end();
921 if (Constant *C = dyn_cast<Constant>(*i))
925 if (Idxs.size() == GEPI->getNumOperands()-1)
926 Changed |= OptimizeAwayTrappingUsesOfValue(GEPI,
927 ConstantExpr::getGetElementPtr(NewV, Idxs));
928 if (GEPI->use_empty()) {
930 GEPI->eraseFromParent();
939 /// OptimizeAwayTrappingUsesOfLoads - The specified global has only one non-null
940 /// value stored into it. If there are uses of the loaded value that would trap
941 /// if the loaded value is dynamically null, then we know that they cannot be
942 /// reachable with a null optimize away the load.
943 static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
945 TargetLibraryInfo *TLI) {
946 bool Changed = false;
948 // Keep track of whether we are able to remove all the uses of the global
949 // other than the store that defines it.
950 bool AllNonStoreUsesGone = true;
952 // Replace all uses of loads with uses of uses of the stored value.
953 for (Value::use_iterator GUI = GV->use_begin(), E = GV->use_end(); GUI != E;){
954 User *GlobalUser = *GUI++;
955 if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) {
956 Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV);
957 // If we were able to delete all uses of the loads
958 if (LI->use_empty()) {
959 LI->eraseFromParent();
962 AllNonStoreUsesGone = false;
964 } else if (isa<StoreInst>(GlobalUser)) {
965 // Ignore the store that stores "LV" to the global.
966 assert(GlobalUser->getOperand(1) == GV &&
967 "Must be storing *to* the global");
969 AllNonStoreUsesGone = false;
971 // If we get here we could have other crazy uses that are transitively
973 assert((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) ||
974 isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) ||
975 isa<BitCastInst>(GlobalUser) ||
976 isa<GetElementPtrInst>(GlobalUser)) &&
977 "Only expect load and stores!");
982 DEBUG(dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV);
986 // If we nuked all of the loads, then none of the stores are needed either,
987 // nor is the global.
988 if (AllNonStoreUsesGone) {
989 if (isLeakCheckerRoot(GV)) {
990 Changed |= CleanupPointerRootUsers(GV, TLI);
993 CleanupConstantGlobalUsers(GV, 0, TD, TLI);
995 if (GV->use_empty()) {
996 DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n");
998 GV->eraseFromParent();
1005 /// ConstantPropUsersOf - Walk the use list of V, constant folding all of the
1006 /// instructions that are foldable.
1007 static void ConstantPropUsersOf(Value *V,
1008 DataLayout *TD, TargetLibraryInfo *TLI) {
1009 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; )
1010 if (Instruction *I = dyn_cast<Instruction>(*UI++))
1011 if (Constant *NewC = ConstantFoldInstruction(I, TD, TLI)) {
1012 I->replaceAllUsesWith(NewC);
1014 // Advance UI to the next non-I use to avoid invalidating it!
1015 // Instructions could multiply use V.
1016 while (UI != E && *UI == I)
1018 I->eraseFromParent();
1022 /// OptimizeGlobalAddressOfMalloc - This function takes the specified global
1023 /// variable, and transforms the program as if it always contained the result of
1024 /// the specified malloc. Because it is always the result of the specified
1025 /// malloc, there is no reason to actually DO the malloc. Instead, turn the
1026 /// malloc into a global, and any loads of GV as uses of the new global.
1027 static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
1030 ConstantInt *NElements,
1032 TargetLibraryInfo *TLI) {
1033 DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n');
1036 if (NElements->getZExtValue() == 1)
1037 GlobalType = AllocTy;
1039 // If we have an array allocation, the global variable is of an array.
1040 GlobalType = ArrayType::get(AllocTy, NElements->getZExtValue());
1042 // Create the new global variable. The contents of the malloc'd memory is
1043 // undefined, so initialize with an undef value.
1044 GlobalVariable *NewGV = new GlobalVariable(*GV->getParent(),
1046 GlobalValue::InternalLinkage,
1047 UndefValue::get(GlobalType),
1048 GV->getName()+".body",
1050 GV->getThreadLocalMode());
1052 // If there are bitcast users of the malloc (which is typical, usually we have
1053 // a malloc + bitcast) then replace them with uses of the new global. Update
1054 // other users to use the global as well.
1055 BitCastInst *TheBC = 0;
1056 while (!CI->use_empty()) {
1057 Instruction *User = cast<Instruction>(CI->use_back());
1058 if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) {
1059 if (BCI->getType() == NewGV->getType()) {
1060 BCI->replaceAllUsesWith(NewGV);
1061 BCI->eraseFromParent();
1063 BCI->setOperand(0, NewGV);
1067 TheBC = new BitCastInst(NewGV, CI->getType(), "newgv", CI);
1068 User->replaceUsesOfWith(CI, TheBC);
1072 Constant *RepValue = NewGV;
1073 if (NewGV->getType() != GV->getType()->getElementType())
1074 RepValue = ConstantExpr::getBitCast(RepValue,
1075 GV->getType()->getElementType());
1077 // If there is a comparison against null, we will insert a global bool to
1078 // keep track of whether the global was initialized yet or not.
1079 GlobalVariable *InitBool =
1080 new GlobalVariable(Type::getInt1Ty(GV->getContext()), false,
1081 GlobalValue::InternalLinkage,
1082 ConstantInt::getFalse(GV->getContext()),
1083 GV->getName()+".init", GV->getThreadLocalMode());
1084 bool InitBoolUsed = false;
1086 // Loop over all uses of GV, processing them in turn.
1087 while (!GV->use_empty()) {
1088 if (StoreInst *SI = dyn_cast<StoreInst>(GV->use_back())) {
1089 // The global is initialized when the store to it occurs.
1090 new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, false, 0,
1091 SI->getOrdering(), SI->getSynchScope(), SI);
1092 SI->eraseFromParent();
1096 LoadInst *LI = cast<LoadInst>(GV->use_back());
1097 while (!LI->use_empty()) {
1098 Use &LoadUse = LI->use_begin().getUse();
1099 if (!isa<ICmpInst>(LoadUse.getUser())) {
1104 ICmpInst *ICI = cast<ICmpInst>(LoadUse.getUser());
1105 // Replace the cmp X, 0 with a use of the bool value.
1106 // Sink the load to where the compare was, if atomic rules allow us to.
1107 Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", false, 0,
1108 LI->getOrdering(), LI->getSynchScope(),
1109 LI->isUnordered() ? (Instruction*)ICI : LI);
1110 InitBoolUsed = true;
1111 switch (ICI->getPredicate()) {
1112 default: llvm_unreachable("Unknown ICmp Predicate!");
1113 case ICmpInst::ICMP_ULT:
1114 case ICmpInst::ICMP_SLT: // X < null -> always false
1115 LV = ConstantInt::getFalse(GV->getContext());
1117 case ICmpInst::ICMP_ULE:
1118 case ICmpInst::ICMP_SLE:
1119 case ICmpInst::ICMP_EQ:
1120 LV = BinaryOperator::CreateNot(LV, "notinit", ICI);
1122 case ICmpInst::ICMP_NE:
1123 case ICmpInst::ICMP_UGE:
1124 case ICmpInst::ICMP_SGE:
1125 case ICmpInst::ICMP_UGT:
1126 case ICmpInst::ICMP_SGT:
1127 break; // no change.
1129 ICI->replaceAllUsesWith(LV);
1130 ICI->eraseFromParent();
1132 LI->eraseFromParent();
1135 // If the initialization boolean was used, insert it, otherwise delete it.
1136 if (!InitBoolUsed) {
1137 while (!InitBool->use_empty()) // Delete initializations
1138 cast<StoreInst>(InitBool->use_back())->eraseFromParent();
1141 GV->getParent()->getGlobalList().insert(GV, InitBool);
1143 // Now the GV is dead, nuke it and the malloc..
1144 GV->eraseFromParent();
1145 CI->eraseFromParent();
1147 // To further other optimizations, loop over all users of NewGV and try to
1148 // constant prop them. This will promote GEP instructions with constant
1149 // indices into GEP constant-exprs, which will allow global-opt to hack on it.
1150 ConstantPropUsersOf(NewGV, TD, TLI);
1151 if (RepValue != NewGV)
1152 ConstantPropUsersOf(RepValue, TD, TLI);
1157 /// ValueIsOnlyUsedLocallyOrStoredToOneGlobal - Scan the use-list of V checking
1158 /// to make sure that there are no complex uses of V. We permit simple things
1159 /// like dereferencing the pointer, but not storing through the address, unless
1160 /// it is to the specified global.
1161 static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V,
1162 const GlobalVariable *GV,
1163 SmallPtrSet<const PHINode*, 8> &PHIs) {
1164 for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end();
1166 const Instruction *Inst = cast<Instruction>(*UI);
1168 if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) {
1169 continue; // Fine, ignore.
1172 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
1173 if (SI->getOperand(0) == V && SI->getOperand(1) != GV)
1174 return false; // Storing the pointer itself... bad.
1175 continue; // Otherwise, storing through it, or storing into GV... fine.
1178 // Must index into the array and into the struct.
1179 if (isa<GetElementPtrInst>(Inst) && Inst->getNumOperands() >= 3) {
1180 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Inst, GV, PHIs))
1185 if (const PHINode *PN = dyn_cast<PHINode>(Inst)) {
1186 // PHIs are ok if all uses are ok. Don't infinitely recurse through PHI
1188 if (PHIs.insert(PN))
1189 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(PN, GV, PHIs))
1194 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Inst)) {
1195 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(BCI, GV, PHIs))
1205 /// ReplaceUsesOfMallocWithGlobal - The Alloc pointer is stored into GV
1206 /// somewhere. Transform all uses of the allocation into loads from the
1207 /// global and uses of the resultant pointer. Further, delete the store into
1208 /// GV. This assumes that these value pass the
1209 /// 'ValueIsOnlyUsedLocallyOrStoredToOneGlobal' predicate.
1210 static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc,
1211 GlobalVariable *GV) {
1212 while (!Alloc->use_empty()) {
1213 Instruction *U = cast<Instruction>(*Alloc->use_begin());
1214 Instruction *InsertPt = U;
1215 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1216 // If this is the store of the allocation into the global, remove it.
1217 if (SI->getOperand(1) == GV) {
1218 SI->eraseFromParent();
1221 } else if (PHINode *PN = dyn_cast<PHINode>(U)) {
1222 // Insert the load in the corresponding predecessor, not right before the
1224 InsertPt = PN->getIncomingBlock(Alloc->use_begin())->getTerminator();
1225 } else if (isa<BitCastInst>(U)) {
1226 // Must be bitcast between the malloc and store to initialize the global.
1227 ReplaceUsesOfMallocWithGlobal(U, GV);
1228 U->eraseFromParent();
1230 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
1231 // If this is a "GEP bitcast" and the user is a store to the global, then
1232 // just process it as a bitcast.
1233 if (GEPI->hasAllZeroIndices() && GEPI->hasOneUse())
1234 if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->use_back()))
1235 if (SI->getOperand(1) == GV) {
1236 // Must be bitcast GEP between the malloc and store to initialize
1238 ReplaceUsesOfMallocWithGlobal(GEPI, GV);
1239 GEPI->eraseFromParent();
1244 // Insert a load from the global, and use it instead of the malloc.
1245 Value *NL = new LoadInst(GV, GV->getName()+".val", InsertPt);
1246 U->replaceUsesOfWith(Alloc, NL);
1250 /// LoadUsesSimpleEnoughForHeapSRA - Verify that all uses of V (a load, or a phi
1251 /// of a load) are simple enough to perform heap SRA on. This permits GEP's
1252 /// that index through the array and struct field, icmps of null, and PHIs.
1253 static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V,
1254 SmallPtrSet<const PHINode*, 32> &LoadUsingPHIs,
1255 SmallPtrSet<const PHINode*, 32> &LoadUsingPHIsPerLoad) {
1256 // We permit two users of the load: setcc comparing against the null
1257 // pointer, and a getelementptr of a specific form.
1258 for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;
1260 const Instruction *User = cast<Instruction>(*UI);
1262 // Comparison against null is ok.
1263 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(User)) {
1264 if (!isa<ConstantPointerNull>(ICI->getOperand(1)))
1269 // getelementptr is also ok, but only a simple form.
1270 if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) {
1271 // Must index into the array and into the struct.
1272 if (GEPI->getNumOperands() < 3)
1275 // Otherwise the GEP is ok.
1279 if (const PHINode *PN = dyn_cast<PHINode>(User)) {
1280 if (!LoadUsingPHIsPerLoad.insert(PN))
1281 // This means some phi nodes are dependent on each other.
1282 // Avoid infinite looping!
1284 if (!LoadUsingPHIs.insert(PN))
1285 // If we have already analyzed this PHI, then it is safe.
1288 // Make sure all uses of the PHI are simple enough to transform.
1289 if (!LoadUsesSimpleEnoughForHeapSRA(PN,
1290 LoadUsingPHIs, LoadUsingPHIsPerLoad))
1296 // Otherwise we don't know what this is, not ok.
1304 /// AllGlobalLoadUsesSimpleEnoughForHeapSRA - If all users of values loaded from
1305 /// GV are simple enough to perform HeapSRA, return true.
1306 static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable *GV,
1307 Instruction *StoredVal) {
1308 SmallPtrSet<const PHINode*, 32> LoadUsingPHIs;
1309 SmallPtrSet<const PHINode*, 32> LoadUsingPHIsPerLoad;
1310 for (Value::const_use_iterator UI = GV->use_begin(), E = GV->use_end();
1312 if (const LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
1313 if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs,
1314 LoadUsingPHIsPerLoad))
1316 LoadUsingPHIsPerLoad.clear();
1319 // If we reach here, we know that all uses of the loads and transitive uses
1320 // (through PHI nodes) are simple enough to transform. However, we don't know
1321 // that all inputs the to the PHI nodes are in the same equivalence sets.
1322 // Check to verify that all operands of the PHIs are either PHIS that can be
1323 // transformed, loads from GV, or MI itself.
1324 for (SmallPtrSet<const PHINode*, 32>::const_iterator I = LoadUsingPHIs.begin()
1325 , E = LoadUsingPHIs.end(); I != E; ++I) {
1326 const PHINode *PN = *I;
1327 for (unsigned op = 0, e = PN->getNumIncomingValues(); op != e; ++op) {
1328 Value *InVal = PN->getIncomingValue(op);
1330 // PHI of the stored value itself is ok.
1331 if (InVal == StoredVal) continue;
1333 if (const PHINode *InPN = dyn_cast<PHINode>(InVal)) {
1334 // One of the PHIs in our set is (optimistically) ok.
1335 if (LoadUsingPHIs.count(InPN))
1340 // Load from GV is ok.
1341 if (const LoadInst *LI = dyn_cast<LoadInst>(InVal))
1342 if (LI->getOperand(0) == GV)
1347 // Anything else is rejected.
1355 static Value *GetHeapSROAValue(Value *V, unsigned FieldNo,
1356 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
1357 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) {
1358 std::vector<Value*> &FieldVals = InsertedScalarizedValues[V];
1360 if (FieldNo >= FieldVals.size())
1361 FieldVals.resize(FieldNo+1);
1363 // If we already have this value, just reuse the previously scalarized
1365 if (Value *FieldVal = FieldVals[FieldNo])
1368 // Depending on what instruction this is, we have several cases.
1370 if (LoadInst *LI = dyn_cast<LoadInst>(V)) {
1371 // This is a scalarized version of the load from the global. Just create
1372 // a new Load of the scalarized global.
1373 Result = new LoadInst(GetHeapSROAValue(LI->getOperand(0), FieldNo,
1374 InsertedScalarizedValues,
1376 LI->getName()+".f"+Twine(FieldNo), LI);
1377 } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
1378 // PN's type is pointer to struct. Make a new PHI of pointer to struct
1381 cast<StructType>(cast<PointerType>(PN->getType())->getElementType());
1384 PHINode::Create(PointerType::getUnqual(ST->getElementType(FieldNo)),
1385 PN->getNumIncomingValues(),
1386 PN->getName()+".f"+Twine(FieldNo), PN);
1388 PHIsToRewrite.push_back(std::make_pair(PN, FieldNo));
1390 llvm_unreachable("Unknown usable value");
1393 return FieldVals[FieldNo] = Result;
1396 /// RewriteHeapSROALoadUser - Given a load instruction and a value derived from
1397 /// the load, rewrite the derived value to use the HeapSRoA'd load.
1398 static void RewriteHeapSROALoadUser(Instruction *LoadUser,
1399 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
1400 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) {
1401 // If this is a comparison against null, handle it.
1402 if (ICmpInst *SCI = dyn_cast<ICmpInst>(LoadUser)) {
1403 assert(isa<ConstantPointerNull>(SCI->getOperand(1)));
1404 // If we have a setcc of the loaded pointer, we can use a setcc of any
1406 Value *NPtr = GetHeapSROAValue(SCI->getOperand(0), 0,
1407 InsertedScalarizedValues, PHIsToRewrite);
1409 Value *New = new ICmpInst(SCI, SCI->getPredicate(), NPtr,
1410 Constant::getNullValue(NPtr->getType()),
1412 SCI->replaceAllUsesWith(New);
1413 SCI->eraseFromParent();
1417 // Handle 'getelementptr Ptr, Idx, i32 FieldNo ...'
1418 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(LoadUser)) {
1419 assert(GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2))
1420 && "Unexpected GEPI!");
1422 // Load the pointer for this field.
1423 unsigned FieldNo = cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue();
1424 Value *NewPtr = GetHeapSROAValue(GEPI->getOperand(0), FieldNo,
1425 InsertedScalarizedValues, PHIsToRewrite);
1427 // Create the new GEP idx vector.
1428 SmallVector<Value*, 8> GEPIdx;
1429 GEPIdx.push_back(GEPI->getOperand(1));
1430 GEPIdx.append(GEPI->op_begin()+3, GEPI->op_end());
1432 Value *NGEPI = GetElementPtrInst::Create(NewPtr, GEPIdx,
1433 GEPI->getName(), GEPI);
1434 GEPI->replaceAllUsesWith(NGEPI);
1435 GEPI->eraseFromParent();
1439 // Recursively transform the users of PHI nodes. This will lazily create the
1440 // PHIs that are needed for individual elements. Keep track of what PHIs we
1441 // see in InsertedScalarizedValues so that we don't get infinite loops (very
1442 // antisocial). If the PHI is already in InsertedScalarizedValues, it has
1443 // already been seen first by another load, so its uses have already been
1445 PHINode *PN = cast<PHINode>(LoadUser);
1446 if (!InsertedScalarizedValues.insert(std::make_pair(PN,
1447 std::vector<Value*>())).second)
1450 // If this is the first time we've seen this PHI, recursively process all
1452 for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end(); UI != E; ) {
1453 Instruction *User = cast<Instruction>(*UI++);
1454 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite);
1458 /// RewriteUsesOfLoadForHeapSRoA - We are performing Heap SRoA on a global. Ptr
1459 /// is a value loaded from the global. Eliminate all uses of Ptr, making them
1460 /// use FieldGlobals instead. All uses of loaded values satisfy
1461 /// AllGlobalLoadUsesSimpleEnoughForHeapSRA.
1462 static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
1463 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
1464 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) {
1465 for (Value::use_iterator UI = Load->use_begin(), E = Load->use_end();
1467 Instruction *User = cast<Instruction>(*UI++);
1468 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite);
1471 if (Load->use_empty()) {
1472 Load->eraseFromParent();
1473 InsertedScalarizedValues.erase(Load);
1477 /// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break
1478 /// it up into multiple allocations of arrays of the fields.
1479 static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
1480 Value *NElems, DataLayout *TD,
1481 const TargetLibraryInfo *TLI) {
1482 DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n');
1483 Type *MAT = getMallocAllocatedType(CI, TLI);
1484 StructType *STy = cast<StructType>(MAT);
1486 // There is guaranteed to be at least one use of the malloc (storing
1487 // it into GV). If there are other uses, change them to be uses of
1488 // the global to simplify later code. This also deletes the store
1490 ReplaceUsesOfMallocWithGlobal(CI, GV);
1492 // Okay, at this point, there are no users of the malloc. Insert N
1493 // new mallocs at the same place as CI, and N globals.
1494 std::vector<Value*> FieldGlobals;
1495 std::vector<Value*> FieldMallocs;
1497 for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){
1498 Type *FieldTy = STy->getElementType(FieldNo);
1499 PointerType *PFieldTy = PointerType::getUnqual(FieldTy);
1501 GlobalVariable *NGV =
1502 new GlobalVariable(*GV->getParent(),
1503 PFieldTy, false, GlobalValue::InternalLinkage,
1504 Constant::getNullValue(PFieldTy),
1505 GV->getName() + ".f" + Twine(FieldNo), GV,
1506 GV->getThreadLocalMode());
1507 FieldGlobals.push_back(NGV);
1509 unsigned TypeSize = TD->getTypeAllocSize(FieldTy);
1510 if (StructType *ST = dyn_cast<StructType>(FieldTy))
1511 TypeSize = TD->getStructLayout(ST)->getSizeInBytes();
1512 Type *IntPtrTy = TD->getIntPtrType(CI->getContext());
1513 Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy,
1514 ConstantInt::get(IntPtrTy, TypeSize),
1516 CI->getName() + ".f" + Twine(FieldNo));
1517 FieldMallocs.push_back(NMI);
1518 new StoreInst(NMI, NGV, CI);
1521 // The tricky aspect of this transformation is handling the case when malloc
1522 // fails. In the original code, malloc failing would set the result pointer
1523 // of malloc to null. In this case, some mallocs could succeed and others
1524 // could fail. As such, we emit code that looks like this:
1525 // F0 = malloc(field0)
1526 // F1 = malloc(field1)
1527 // F2 = malloc(field2)
1528 // if (F0 == 0 || F1 == 0 || F2 == 0) {
1529 // if (F0) { free(F0); F0 = 0; }
1530 // if (F1) { free(F1); F1 = 0; }
1531 // if (F2) { free(F2); F2 = 0; }
1533 // The malloc can also fail if its argument is too large.
1534 Constant *ConstantZero = ConstantInt::get(CI->getArgOperand(0)->getType(), 0);
1535 Value *RunningOr = new ICmpInst(CI, ICmpInst::ICMP_SLT, CI->getArgOperand(0),
1536 ConstantZero, "isneg");
1537 for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) {
1538 Value *Cond = new ICmpInst(CI, ICmpInst::ICMP_EQ, FieldMallocs[i],
1539 Constant::getNullValue(FieldMallocs[i]->getType()),
1541 RunningOr = BinaryOperator::CreateOr(RunningOr, Cond, "tmp", CI);
1544 // Split the basic block at the old malloc.
1545 BasicBlock *OrigBB = CI->getParent();
1546 BasicBlock *ContBB = OrigBB->splitBasicBlock(CI, "malloc_cont");
1548 // Create the block to check the first condition. Put all these blocks at the
1549 // end of the function as they are unlikely to be executed.
1550 BasicBlock *NullPtrBlock = BasicBlock::Create(OrigBB->getContext(),
1552 OrigBB->getParent());
1554 // Remove the uncond branch from OrigBB to ContBB, turning it into a cond
1555 // branch on RunningOr.
1556 OrigBB->getTerminator()->eraseFromParent();
1557 BranchInst::Create(NullPtrBlock, ContBB, RunningOr, OrigBB);
1559 // Within the NullPtrBlock, we need to emit a comparison and branch for each
1560 // pointer, because some may be null while others are not.
1561 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
1562 Value *GVVal = new LoadInst(FieldGlobals[i], "tmp", NullPtrBlock);
1563 Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal,
1564 Constant::getNullValue(GVVal->getType()));
1565 BasicBlock *FreeBlock = BasicBlock::Create(Cmp->getContext(), "free_it",
1566 OrigBB->getParent());
1567 BasicBlock *NextBlock = BasicBlock::Create(Cmp->getContext(), "next",
1568 OrigBB->getParent());
1569 Instruction *BI = BranchInst::Create(FreeBlock, NextBlock,
1572 // Fill in FreeBlock.
1573 CallInst::CreateFree(GVVal, BI);
1574 new StoreInst(Constant::getNullValue(GVVal->getType()), FieldGlobals[i],
1576 BranchInst::Create(NextBlock, FreeBlock);
1578 NullPtrBlock = NextBlock;
1581 BranchInst::Create(ContBB, NullPtrBlock);
1583 // CI is no longer needed, remove it.
1584 CI->eraseFromParent();
1586 /// InsertedScalarizedLoads - As we process loads, if we can't immediately
1587 /// update all uses of the load, keep track of what scalarized loads are
1588 /// inserted for a given load.
1589 DenseMap<Value*, std::vector<Value*> > InsertedScalarizedValues;
1590 InsertedScalarizedValues[GV] = FieldGlobals;
1592 std::vector<std::pair<PHINode*, unsigned> > PHIsToRewrite;
1594 // Okay, the malloc site is completely handled. All of the uses of GV are now
1595 // loads, and all uses of those loads are simple. Rewrite them to use loads
1596 // of the per-field globals instead.
1597 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); UI != E;) {
1598 Instruction *User = cast<Instruction>(*UI++);
1600 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
1601 RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite);
1605 // Must be a store of null.
1606 StoreInst *SI = cast<StoreInst>(User);
1607 assert(isa<ConstantPointerNull>(SI->getOperand(0)) &&
1608 "Unexpected heap-sra user!");
1610 // Insert a store of null into each global.
1611 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
1612 PointerType *PT = cast<PointerType>(FieldGlobals[i]->getType());
1613 Constant *Null = Constant::getNullValue(PT->getElementType());
1614 new StoreInst(Null, FieldGlobals[i], SI);
1616 // Erase the original store.
1617 SI->eraseFromParent();
1620 // While we have PHIs that are interesting to rewrite, do it.
1621 while (!PHIsToRewrite.empty()) {
1622 PHINode *PN = PHIsToRewrite.back().first;
1623 unsigned FieldNo = PHIsToRewrite.back().second;
1624 PHIsToRewrite.pop_back();
1625 PHINode *FieldPN = cast<PHINode>(InsertedScalarizedValues[PN][FieldNo]);
1626 assert(FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi");
1628 // Add all the incoming values. This can materialize more phis.
1629 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1630 Value *InVal = PN->getIncomingValue(i);
1631 InVal = GetHeapSROAValue(InVal, FieldNo, InsertedScalarizedValues,
1633 FieldPN->addIncoming(InVal, PN->getIncomingBlock(i));
1637 // Drop all inter-phi links and any loads that made it this far.
1638 for (DenseMap<Value*, std::vector<Value*> >::iterator
1639 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end();
1641 if (PHINode *PN = dyn_cast<PHINode>(I->first))
1642 PN->dropAllReferences();
1643 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first))
1644 LI->dropAllReferences();
1647 // Delete all the phis and loads now that inter-references are dead.
1648 for (DenseMap<Value*, std::vector<Value*> >::iterator
1649 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end();
1651 if (PHINode *PN = dyn_cast<PHINode>(I->first))
1652 PN->eraseFromParent();
1653 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first))
1654 LI->eraseFromParent();
1657 // The old global is now dead, remove it.
1658 GV->eraseFromParent();
1661 return cast<GlobalVariable>(FieldGlobals[0]);
1664 /// TryToOptimizeStoreOfMallocToGlobal - This function is called when we see a
1665 /// pointer global variable with a single value stored it that is a malloc or
1667 static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
1670 AtomicOrdering Ordering,
1671 Module::global_iterator &GVI,
1673 TargetLibraryInfo *TLI) {
1677 // If this is a malloc of an abstract type, don't touch it.
1678 if (!AllocTy->isSized())
1681 // We can't optimize this global unless all uses of it are *known* to be
1682 // of the malloc value, not of the null initializer value (consider a use
1683 // that compares the global's value against zero to see if the malloc has
1684 // been reached). To do this, we check to see if all uses of the global
1685 // would trap if the global were null: this proves that they must all
1686 // happen after the malloc.
1687 if (!AllUsesOfLoadedValueWillTrapIfNull(GV))
1690 // We can't optimize this if the malloc itself is used in a complex way,
1691 // for example, being stored into multiple globals. This allows the
1692 // malloc to be stored into the specified global, loaded icmp'd, and
1693 // GEP'd. These are all things we could transform to using the global
1695 SmallPtrSet<const PHINode*, 8> PHIs;
1696 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(CI, GV, PHIs))
1699 // If we have a global that is only initialized with a fixed size malloc,
1700 // transform the program to use global memory instead of malloc'd memory.
1701 // This eliminates dynamic allocation, avoids an indirection accessing the
1702 // data, and exposes the resultant global to further GlobalOpt.
1703 // We cannot optimize the malloc if we cannot determine malloc array size.
1704 Value *NElems = getMallocArraySize(CI, TD, TLI, true);
1708 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems))
1709 // Restrict this transformation to only working on small allocations
1710 // (2048 bytes currently), as we don't want to introduce a 16M global or
1712 if (NElements->getZExtValue() * TD->getTypeAllocSize(AllocTy) < 2048) {
1713 GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, TD, TLI);
1717 // If the allocation is an array of structures, consider transforming this
1718 // into multiple malloc'd arrays, one for each field. This is basically
1719 // SRoA for malloc'd memory.
1721 if (Ordering != NotAtomic)
1724 // If this is an allocation of a fixed size array of structs, analyze as a
1725 // variable size array. malloc [100 x struct],1 -> malloc struct, 100
1726 if (NElems == ConstantInt::get(CI->getArgOperand(0)->getType(), 1))
1727 if (ArrayType *AT = dyn_cast<ArrayType>(AllocTy))
1728 AllocTy = AT->getElementType();
1730 StructType *AllocSTy = dyn_cast<StructType>(AllocTy);
1734 // This the structure has an unreasonable number of fields, leave it
1736 if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 &&
1737 AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, CI)) {
1739 // If this is a fixed size array, transform the Malloc to be an alloc of
1740 // structs. malloc [100 x struct],1 -> malloc struct, 100
1741 if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) {
1742 Type *IntPtrTy = TD->getIntPtrType(CI->getContext());
1743 unsigned TypeSize = TD->getStructLayout(AllocSTy)->getSizeInBytes();
1744 Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize);
1745 Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements());
1746 Instruction *Malloc = CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy,
1747 AllocSize, NumElements,
1749 Instruction *Cast = new BitCastInst(Malloc, CI->getType(), "tmp", CI);
1750 CI->replaceAllUsesWith(Cast);
1751 CI->eraseFromParent();
1752 if (BitCastInst *BCI = dyn_cast<BitCastInst>(Malloc))
1753 CI = cast<CallInst>(BCI->getOperand(0));
1755 CI = cast<CallInst>(Malloc);
1758 GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, TD, TLI, true),
1766 // OptimizeOnceStoredGlobal - Try to optimize globals based on the knowledge
1767 // that only one value (besides its initializer) is ever stored to the global.
1768 static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
1769 AtomicOrdering Ordering,
1770 Module::global_iterator &GVI,
1771 DataLayout *TD, TargetLibraryInfo *TLI) {
1772 // Ignore no-op GEPs and bitcasts.
1773 StoredOnceVal = StoredOnceVal->stripPointerCasts();
1775 // If we are dealing with a pointer global that is initialized to null and
1776 // only has one (non-null) value stored into it, then we can optimize any
1777 // users of the loaded value (often calls and loads) that would trap if the
1779 if (GV->getInitializer()->getType()->isPointerTy() &&
1780 GV->getInitializer()->isNullValue()) {
1781 if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) {
1782 if (GV->getInitializer()->getType() != SOVC->getType())
1783 SOVC = ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType());
1785 // Optimize away any trapping uses of the loaded value.
1786 if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, TD, TLI))
1788 } else if (CallInst *CI = extractMallocCall(StoredOnceVal, TLI)) {
1789 Type *MallocType = getMallocAllocatedType(CI, TLI);
1791 TryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType, Ordering, GVI,
1800 /// TryToShrinkGlobalToBoolean - At this point, we have learned that the only
1801 /// two values ever stored into GV are its initializer and OtherVal. See if we
1802 /// can shrink the global into a boolean and select between the two values
1803 /// whenever it is used. This exposes the values to other scalar optimizations.
1804 static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
1805 Type *GVElType = GV->getType()->getElementType();
1807 // If GVElType is already i1, it is already shrunk. If the type of the GV is
1808 // an FP value, pointer or vector, don't do this optimization because a select
1809 // between them is very expensive and unlikely to lead to later
1810 // simplification. In these cases, we typically end up with "cond ? v1 : v2"
1811 // where v1 and v2 both require constant pool loads, a big loss.
1812 if (GVElType == Type::getInt1Ty(GV->getContext()) ||
1813 GVElType->isFloatingPointTy() ||
1814 GVElType->isPointerTy() || GVElType->isVectorTy())
1817 // Walk the use list of the global seeing if all the uses are load or store.
1818 // If there is anything else, bail out.
1819 for (Value::use_iterator I = GV->use_begin(), E = GV->use_end(); I != E; ++I){
1821 if (!isa<LoadInst>(U) && !isa<StoreInst>(U))
1825 DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV);
1827 // Create the new global, initializing it to false.
1828 GlobalVariable *NewGV = new GlobalVariable(Type::getInt1Ty(GV->getContext()),
1830 GlobalValue::InternalLinkage,
1831 ConstantInt::getFalse(GV->getContext()),
1833 GV->getThreadLocalMode());
1834 GV->getParent()->getGlobalList().insert(GV, NewGV);
1836 Constant *InitVal = GV->getInitializer();
1837 assert(InitVal->getType() != Type::getInt1Ty(GV->getContext()) &&
1838 "No reason to shrink to bool!");
1840 // If initialized to zero and storing one into the global, we can use a cast
1841 // instead of a select to synthesize the desired value.
1842 bool IsOneZero = false;
1843 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal))
1844 IsOneZero = InitVal->isNullValue() && CI->isOne();
1846 while (!GV->use_empty()) {
1847 Instruction *UI = cast<Instruction>(GV->use_back());
1848 if (StoreInst *SI = dyn_cast<StoreInst>(UI)) {
1849 // Change the store into a boolean store.
1850 bool StoringOther = SI->getOperand(0) == OtherVal;
1851 // Only do this if we weren't storing a loaded value.
1853 if (StoringOther || SI->getOperand(0) == InitVal)
1854 StoreVal = ConstantInt::get(Type::getInt1Ty(GV->getContext()),
1857 // Otherwise, we are storing a previously loaded copy. To do this,
1858 // change the copy from copying the original value to just copying the
1860 Instruction *StoredVal = cast<Instruction>(SI->getOperand(0));
1862 // If we've already replaced the input, StoredVal will be a cast or
1863 // select instruction. If not, it will be a load of the original
1865 if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
1866 assert(LI->getOperand(0) == GV && "Not a copy!");
1867 // Insert a new load, to preserve the saved value.
1868 StoreVal = new LoadInst(NewGV, LI->getName()+".b", false, 0,
1869 LI->getOrdering(), LI->getSynchScope(), LI);
1871 assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) &&
1872 "This is not a form that we understand!");
1873 StoreVal = StoredVal->getOperand(0);
1874 assert(isa<LoadInst>(StoreVal) && "Not a load of NewGV!");
1877 new StoreInst(StoreVal, NewGV, false, 0,
1878 SI->getOrdering(), SI->getSynchScope(), SI);
1880 // Change the load into a load of bool then a select.
1881 LoadInst *LI = cast<LoadInst>(UI);
1882 LoadInst *NLI = new LoadInst(NewGV, LI->getName()+".b", false, 0,
1883 LI->getOrdering(), LI->getSynchScope(), LI);
1886 NSI = new ZExtInst(NLI, LI->getType(), "", LI);
1888 NSI = SelectInst::Create(NLI, OtherVal, InitVal, "", LI);
1890 LI->replaceAllUsesWith(NSI);
1892 UI->eraseFromParent();
1895 GV->eraseFromParent();
1900 /// ProcessGlobal - Analyze the specified global variable and optimize it if
1901 /// possible. If we make a change, return true.
1902 bool GlobalOpt::ProcessGlobal(GlobalVariable *GV,
1903 Module::global_iterator &GVI) {
1904 if (!GV->isDiscardableIfUnused())
1907 // Do more involved optimizations if the global is internal.
1908 GV->removeDeadConstantUsers();
1910 if (GV->use_empty()) {
1911 DEBUG(dbgs() << "GLOBAL DEAD: " << *GV);
1912 GV->eraseFromParent();
1917 if (!GV->hasLocalLinkage())
1920 SmallPtrSet<const PHINode*, 16> PHIUsers;
1923 if (AnalyzeGlobal(GV, GS, PHIUsers))
1926 if (!GS.isCompared && !GV->hasUnnamedAddr()) {
1927 GV->setUnnamedAddr(true);
1931 if (GV->isConstant() || !GV->hasInitializer())
1934 return ProcessInternalGlobal(GV, GVI, PHIUsers, GS);
1937 /// ProcessInternalGlobal - Analyze the specified global variable and optimize
1938 /// it if possible. If we make a change, return true.
1939 bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
1940 Module::global_iterator &GVI,
1941 const SmallPtrSet<const PHINode*, 16> &PHIUsers,
1942 const GlobalStatus &GS) {
1943 // If this is a first class global and has only one accessing function
1944 // and this function is main (which we know is not recursive we can make
1945 // this global a local variable) we replace the global with a local alloca
1946 // in this function.
1948 // NOTE: It doesn't make sense to promote non single-value types since we
1949 // are just replacing static memory to stack memory.
1951 // If the global is in different address space, don't bring it to stack.
1952 if (!GS.HasMultipleAccessingFunctions &&
1953 GS.AccessingFunction && !GS.HasNonInstructionUser &&
1954 GV->getType()->getElementType()->isSingleValueType() &&
1955 GS.AccessingFunction->getName() == "main" &&
1956 GS.AccessingFunction->hasExternalLinkage() &&
1957 GV->getType()->getAddressSpace() == 0) {
1958 DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV);
1959 Instruction &FirstI = const_cast<Instruction&>(*GS.AccessingFunction
1960 ->getEntryBlock().begin());
1961 Type *ElemTy = GV->getType()->getElementType();
1962 // FIXME: Pass Global's alignment when globals have alignment
1963 AllocaInst *Alloca = new AllocaInst(ElemTy, NULL, GV->getName(), &FirstI);
1964 if (!isa<UndefValue>(GV->getInitializer()))
1965 new StoreInst(GV->getInitializer(), Alloca, &FirstI);
1967 GV->replaceAllUsesWith(Alloca);
1968 GV->eraseFromParent();
1973 // If the global is never loaded (but may be stored to), it is dead.
1976 DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV);
1979 if (isLeakCheckerRoot(GV)) {
1980 // Delete any constant stores to the global.
1981 Changed = CleanupPointerRootUsers(GV, TLI);
1983 // Delete any stores we can find to the global. We may not be able to
1984 // make it completely dead though.
1985 Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(), TD, TLI);
1988 // If the global is dead now, delete it.
1989 if (GV->use_empty()) {
1990 GV->eraseFromParent();
1996 } else if (GS.StoredType <= GlobalStatus::isInitializerStored) {
1997 DEBUG(dbgs() << "MARKING CONSTANT: " << *GV);
1998 GV->setConstant(true);
2000 // Clean up any obviously simplifiable users now.
2001 CleanupConstantGlobalUsers(GV, GV->getInitializer(), TD, TLI);
2003 // If the global is dead now, just nuke it.
2004 if (GV->use_empty()) {
2005 DEBUG(dbgs() << " *** Marking constant allowed us to simplify "
2006 << "all users and delete global!\n");
2007 GV->eraseFromParent();
2013 } else if (!GV->getInitializer()->getType()->isSingleValueType()) {
2014 if (DataLayout *TD = getAnalysisIfAvailable<DataLayout>())
2015 if (GlobalVariable *FirstNewGV = SRAGlobal(GV, *TD)) {
2016 GVI = FirstNewGV; // Don't skip the newly produced globals!
2019 } else if (GS.StoredType == GlobalStatus::isStoredOnce) {
2020 // If the initial value for the global was an undef value, and if only
2021 // one other value was stored into it, we can just change the
2022 // initializer to be the stored value, then delete all stores to the
2023 // global. This allows us to mark it constant.
2024 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue))
2025 if (isa<UndefValue>(GV->getInitializer())) {
2026 // Change the initial value here.
2027 GV->setInitializer(SOVConstant);
2029 // Clean up any obviously simplifiable users now.
2030 CleanupConstantGlobalUsers(GV, GV->getInitializer(), TD, TLI);
2032 if (GV->use_empty()) {
2033 DEBUG(dbgs() << " *** Substituting initializer allowed us to "
2034 << "simplify all users and delete global!\n");
2035 GV->eraseFromParent();
2044 // Try to optimize globals based on the knowledge that only one value
2045 // (besides its initializer) is ever stored to the global.
2046 if (OptimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GS.Ordering, GVI,
2050 // Otherwise, if the global was not a boolean, we can shrink it to be a
2052 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue))
2053 if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) {
2062 /// ChangeCalleesToFastCall - Walk all of the direct calls of the specified
2063 /// function, changing them to FastCC.
2064 static void ChangeCalleesToFastCall(Function *F) {
2065 for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){
2066 if (isa<BlockAddress>(*UI))
2068 CallSite User(cast<Instruction>(*UI));
2069 User.setCallingConv(CallingConv::Fast);
2073 static AttrListPtr StripNest(LLVMContext &C, const AttrListPtr &Attrs) {
2074 for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) {
2075 if (!Attrs.getSlot(i).Attrs.hasAttribute(Attributes::Nest))
2078 // There can be only one.
2079 return Attrs.removeAttr(C, Attrs.getSlot(i).Index,
2080 Attributes::get(C, Attributes::Nest));
2086 static void RemoveNestAttribute(Function *F) {
2087 F->setAttributes(StripNest(F->getContext(), F->getAttributes()));
2088 for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){
2089 if (isa<BlockAddress>(*UI))
2091 CallSite User(cast<Instruction>(*UI));
2092 User.setAttributes(StripNest(F->getContext(), User.getAttributes()));
2096 bool GlobalOpt::OptimizeFunctions(Module &M) {
2097 bool Changed = false;
2098 // Optimize functions.
2099 for (Module::iterator FI = M.begin(), E = M.end(); FI != E; ) {
2101 // Functions without names cannot be referenced outside this module.
2102 if (!F->hasName() && !F->isDeclaration())
2103 F->setLinkage(GlobalValue::InternalLinkage);
2104 F->removeDeadConstantUsers();
2105 if (F->isDefTriviallyDead()) {
2106 F->eraseFromParent();
2109 } else if (F->hasLocalLinkage()) {
2110 if (F->getCallingConv() == CallingConv::C && !F->isVarArg() &&
2111 !F->hasAddressTaken()) {
2112 // If this function has C calling conventions, is not a varargs
2113 // function, and is only called directly, promote it to use the Fast
2114 // calling convention.
2115 F->setCallingConv(CallingConv::Fast);
2116 ChangeCalleesToFastCall(F);
2121 if (F->getAttributes().hasAttrSomewhere(Attributes::Nest) &&
2122 !F->hasAddressTaken()) {
2123 // The function is not used by a trampoline intrinsic, so it is safe
2124 // to remove the 'nest' attribute.
2125 RemoveNestAttribute(F);
2134 bool GlobalOpt::OptimizeGlobalVars(Module &M) {
2135 bool Changed = false;
2136 for (Module::global_iterator GVI = M.global_begin(), E = M.global_end();
2138 GlobalVariable *GV = GVI++;
2139 // Global variables without names cannot be referenced outside this module.
2140 if (!GV->hasName() && !GV->isDeclaration())
2141 GV->setLinkage(GlobalValue::InternalLinkage);
2142 // Simplify the initializer.
2143 if (GV->hasInitializer())
2144 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GV->getInitializer())) {
2145 Constant *New = ConstantFoldConstantExpression(CE, TD, TLI);
2146 if (New && New != CE)
2147 GV->setInitializer(New);
2150 Changed |= ProcessGlobal(GV, GVI);
2155 /// FindGlobalCtors - Find the llvm.global_ctors list, verifying that all
2156 /// initializers have an init priority of 65535.
2157 GlobalVariable *GlobalOpt::FindGlobalCtors(Module &M) {
2158 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
2159 if (GV == 0) return 0;
2161 // Verify that the initializer is simple enough for us to handle. We are
2162 // only allowed to optimize the initializer if it is unique.
2163 if (!GV->hasUniqueInitializer()) return 0;
2165 if (isa<ConstantAggregateZero>(GV->getInitializer()))
2167 ConstantArray *CA = cast<ConstantArray>(GV->getInitializer());
2169 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) {
2170 if (isa<ConstantAggregateZero>(*i))
2172 ConstantStruct *CS = cast<ConstantStruct>(*i);
2173 if (isa<ConstantPointerNull>(CS->getOperand(1)))
2176 // Must have a function or null ptr.
2177 if (!isa<Function>(CS->getOperand(1)))
2180 // Init priority must be standard.
2181 ConstantInt *CI = cast<ConstantInt>(CS->getOperand(0));
2182 if (CI->getZExtValue() != 65535)
2189 /// ParseGlobalCtors - Given a llvm.global_ctors list that we can understand,
2190 /// return a list of the functions and null terminator as a vector.
2191 static std::vector<Function*> ParseGlobalCtors(GlobalVariable *GV) {
2192 if (GV->getInitializer()->isNullValue())
2193 return std::vector<Function*>();
2194 ConstantArray *CA = cast<ConstantArray>(GV->getInitializer());
2195 std::vector<Function*> Result;
2196 Result.reserve(CA->getNumOperands());
2197 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) {
2198 ConstantStruct *CS = cast<ConstantStruct>(*i);
2199 Result.push_back(dyn_cast<Function>(CS->getOperand(1)));
2204 /// InstallGlobalCtors - Given a specified llvm.global_ctors list, install the
2205 /// specified array, returning the new global to use.
2206 static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL,
2207 const std::vector<Function*> &Ctors) {
2208 // If we made a change, reassemble the initializer list.
2209 Constant *CSVals[2];
2210 CSVals[0] = ConstantInt::get(Type::getInt32Ty(GCL->getContext()), 65535);
2213 StructType *StructTy =
2215 cast<ArrayType>(GCL->getType()->getElementType())->getElementType());
2217 // Create the new init list.
2218 std::vector<Constant*> CAList;
2219 for (unsigned i = 0, e = Ctors.size(); i != e; ++i) {
2221 CSVals[1] = Ctors[i];
2223 Type *FTy = FunctionType::get(Type::getVoidTy(GCL->getContext()),
2225 PointerType *PFTy = PointerType::getUnqual(FTy);
2226 CSVals[1] = Constant::getNullValue(PFTy);
2227 CSVals[0] = ConstantInt::get(Type::getInt32Ty(GCL->getContext()),
2230 CAList.push_back(ConstantStruct::get(StructTy, CSVals));
2233 // Create the array initializer.
2234 Constant *CA = ConstantArray::get(ArrayType::get(StructTy,
2235 CAList.size()), CAList);
2237 // If we didn't change the number of elements, don't create a new GV.
2238 if (CA->getType() == GCL->getInitializer()->getType()) {
2239 GCL->setInitializer(CA);
2243 // Create the new global and insert it next to the existing list.
2244 GlobalVariable *NGV = new GlobalVariable(CA->getType(), GCL->isConstant(),
2245 GCL->getLinkage(), CA, "",
2246 GCL->getThreadLocalMode());
2247 GCL->getParent()->getGlobalList().insert(GCL, NGV);
2250 // Nuke the old list, replacing any uses with the new one.
2251 if (!GCL->use_empty()) {
2253 if (V->getType() != GCL->getType())
2254 V = ConstantExpr::getBitCast(V, GCL->getType());
2255 GCL->replaceAllUsesWith(V);
2257 GCL->eraseFromParent();
2267 isSimpleEnoughValueToCommit(Constant *C,
2268 SmallPtrSet<Constant*, 8> &SimpleConstants,
2269 const DataLayout *TD);
2272 /// isSimpleEnoughValueToCommit - Return true if the specified constant can be
2273 /// handled by the code generator. We don't want to generate something like:
2274 /// void *X = &X/42;
2275 /// because the code generator doesn't have a relocation that can handle that.
2277 /// This function should be called if C was not found (but just got inserted)
2278 /// in SimpleConstants to avoid having to rescan the same constants all the
2280 static bool isSimpleEnoughValueToCommitHelper(Constant *C,
2281 SmallPtrSet<Constant*, 8> &SimpleConstants,
2282 const DataLayout *TD) {
2283 // Simple integer, undef, constant aggregate zero, global addresses, etc are
2285 if (C->getNumOperands() == 0 || isa<BlockAddress>(C) ||
2286 isa<GlobalValue>(C))
2289 // Aggregate values are safe if all their elements are.
2290 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C) ||
2291 isa<ConstantVector>(C)) {
2292 for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) {
2293 Constant *Op = cast<Constant>(C->getOperand(i));
2294 if (!isSimpleEnoughValueToCommit(Op, SimpleConstants, TD))
2300 // We don't know exactly what relocations are allowed in constant expressions,
2301 // so we allow &global+constantoffset, which is safe and uniformly supported
2303 ConstantExpr *CE = cast<ConstantExpr>(C);
2304 switch (CE->getOpcode()) {
2305 case Instruction::BitCast:
2306 // Bitcast is fine if the casted value is fine.
2307 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD);
2309 case Instruction::IntToPtr:
2310 case Instruction::PtrToInt:
2311 // int <=> ptr is fine if the int type is the same size as the
2313 if (!TD || TD->getTypeSizeInBits(CE->getType()) !=
2314 TD->getTypeSizeInBits(CE->getOperand(0)->getType()))
2316 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD);
2318 // GEP is fine if it is simple + constant offset.
2319 case Instruction::GetElementPtr:
2320 for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i)
2321 if (!isa<ConstantInt>(CE->getOperand(i)))
2323 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD);
2325 case Instruction::Add:
2326 // We allow simple+cst.
2327 if (!isa<ConstantInt>(CE->getOperand(1)))
2329 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD);
2335 isSimpleEnoughValueToCommit(Constant *C,
2336 SmallPtrSet<Constant*, 8> &SimpleConstants,
2337 const DataLayout *TD) {
2338 // If we already checked this constant, we win.
2339 if (!SimpleConstants.insert(C)) return true;
2340 // Check the constant.
2341 return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, TD);
2345 /// isSimpleEnoughPointerToCommit - Return true if this constant is simple
2346 /// enough for us to understand. In particular, if it is a cast to anything
2347 /// other than from one pointer type to another pointer type, we punt.
2348 /// We basically just support direct accesses to globals and GEP's of
2349 /// globals. This should be kept up to date with CommitValueTo.
2350 static bool isSimpleEnoughPointerToCommit(Constant *C) {
2351 // Conservatively, avoid aggregate types. This is because we don't
2352 // want to worry about them partially overlapping other stores.
2353 if (!cast<PointerType>(C->getType())->getElementType()->isSingleValueType())
2356 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C))
2357 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or
2358 // external globals.
2359 return GV->hasUniqueInitializer();
2361 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
2362 // Handle a constantexpr gep.
2363 if (CE->getOpcode() == Instruction::GetElementPtr &&
2364 isa<GlobalVariable>(CE->getOperand(0)) &&
2365 cast<GEPOperator>(CE)->isInBounds()) {
2366 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0));
2367 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or
2368 // external globals.
2369 if (!GV->hasUniqueInitializer())
2372 // The first index must be zero.
2373 ConstantInt *CI = dyn_cast<ConstantInt>(*llvm::next(CE->op_begin()));
2374 if (!CI || !CI->isZero()) return false;
2376 // The remaining indices must be compile-time known integers within the
2377 // notional bounds of the corresponding static array types.
2378 if (!CE->isGEPWithNoNotionalOverIndexing())
2381 return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE);
2383 // A constantexpr bitcast from a pointer to another pointer is a no-op,
2384 // and we know how to evaluate it by moving the bitcast from the pointer
2385 // operand to the value operand.
2386 } else if (CE->getOpcode() == Instruction::BitCast &&
2387 isa<GlobalVariable>(CE->getOperand(0))) {
2388 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or
2389 // external globals.
2390 return cast<GlobalVariable>(CE->getOperand(0))->hasUniqueInitializer();
2397 /// EvaluateStoreInto - Evaluate a piece of a constantexpr store into a global
2398 /// initializer. This returns 'Init' modified to reflect 'Val' stored into it.
2399 /// At this point, the GEP operands of Addr [0, OpNo) have been stepped into.
2400 static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
2401 ConstantExpr *Addr, unsigned OpNo) {
2402 // Base case of the recursion.
2403 if (OpNo == Addr->getNumOperands()) {
2404 assert(Val->getType() == Init->getType() && "Type mismatch!");
2408 SmallVector<Constant*, 32> Elts;
2409 if (StructType *STy = dyn_cast<StructType>(Init->getType())) {
2410 // Break up the constant into its elements.
2411 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
2412 Elts.push_back(Init->getAggregateElement(i));
2414 // Replace the element that we are supposed to.
2415 ConstantInt *CU = cast<ConstantInt>(Addr->getOperand(OpNo));
2416 unsigned Idx = CU->getZExtValue();
2417 assert(Idx < STy->getNumElements() && "Struct index out of range!");
2418 Elts[Idx] = EvaluateStoreInto(Elts[Idx], Val, Addr, OpNo+1);
2420 // Return the modified struct.
2421 return ConstantStruct::get(STy, Elts);
2424 ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo));
2425 SequentialType *InitTy = cast<SequentialType>(Init->getType());
2428 if (ArrayType *ATy = dyn_cast<ArrayType>(InitTy))
2429 NumElts = ATy->getNumElements();
2431 NumElts = InitTy->getVectorNumElements();
2433 // Break up the array into elements.
2434 for (uint64_t i = 0, e = NumElts; i != e; ++i)
2435 Elts.push_back(Init->getAggregateElement(i));
2437 assert(CI->getZExtValue() < NumElts);
2438 Elts[CI->getZExtValue()] =
2439 EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1);
2441 if (Init->getType()->isArrayTy())
2442 return ConstantArray::get(cast<ArrayType>(InitTy), Elts);
2443 return ConstantVector::get(Elts);
2446 /// CommitValueTo - We have decided that Addr (which satisfies the predicate
2447 /// isSimpleEnoughPointerToCommit) should get Val as its value. Make it happen.
2448 static void CommitValueTo(Constant *Val, Constant *Addr) {
2449 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
2450 assert(GV->hasInitializer());
2451 GV->setInitializer(Val);
2455 ConstantExpr *CE = cast<ConstantExpr>(Addr);
2456 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0));
2457 GV->setInitializer(EvaluateStoreInto(GV->getInitializer(), Val, CE, 2));
2462 /// Evaluator - This class evaluates LLVM IR, producing the Constant
2463 /// representing each SSA instruction. Changes to global variables are stored
2464 /// in a mapping that can be iterated over after the evaluation is complete.
2465 /// Once an evaluation call fails, the evaluation object should not be reused.
2468 Evaluator(const DataLayout *TD, const TargetLibraryInfo *TLI)
2469 : TD(TD), TLI(TLI) {
2470 ValueStack.push_back(new DenseMap<Value*, Constant*>);
2474 DeleteContainerPointers(ValueStack);
2475 while (!AllocaTmps.empty()) {
2476 GlobalVariable *Tmp = AllocaTmps.back();
2477 AllocaTmps.pop_back();
2479 // If there are still users of the alloca, the program is doing something
2480 // silly, e.g. storing the address of the alloca somewhere and using it
2481 // later. Since this is undefined, we'll just make it be null.
2482 if (!Tmp->use_empty())
2483 Tmp->replaceAllUsesWith(Constant::getNullValue(Tmp->getType()));
2488 /// EvaluateFunction - Evaluate a call to function F, returning true if
2489 /// successful, false if we can't evaluate it. ActualArgs contains the formal
2490 /// arguments for the function.
2491 bool EvaluateFunction(Function *F, Constant *&RetVal,
2492 const SmallVectorImpl<Constant*> &ActualArgs);
2494 /// EvaluateBlock - Evaluate all instructions in block BB, returning true if
2495 /// successful, false if we can't evaluate it. NewBB returns the next BB that
2496 /// control flows into, or null upon return.
2497 bool EvaluateBlock(BasicBlock::iterator CurInst, BasicBlock *&NextBB);
2499 Constant *getVal(Value *V) {
2500 if (Constant *CV = dyn_cast<Constant>(V)) return CV;
2501 Constant *R = ValueStack.back()->lookup(V);
2502 assert(R && "Reference to an uncomputed value!");
2506 void setVal(Value *V, Constant *C) {
2507 ValueStack.back()->operator[](V) = C;
2510 const DenseMap<Constant*, Constant*> &getMutatedMemory() const {
2511 return MutatedMemory;
2514 const SmallPtrSet<GlobalVariable*, 8> &getInvariants() const {
2519 Constant *ComputeLoadResult(Constant *P);
2521 /// ValueStack - As we compute SSA register values, we store their contents
2522 /// here. The back of the vector contains the current function and the stack
2523 /// contains the values in the calling frames.
2524 SmallVector<DenseMap<Value*, Constant*>*, 4> ValueStack;
2526 /// CallStack - This is used to detect recursion. In pathological situations
2527 /// we could hit exponential behavior, but at least there is nothing
2529 SmallVector<Function*, 4> CallStack;
2531 /// MutatedMemory - For each store we execute, we update this map. Loads
2532 /// check this to get the most up-to-date value. If evaluation is successful,
2533 /// this state is committed to the process.
2534 DenseMap<Constant*, Constant*> MutatedMemory;
2536 /// AllocaTmps - To 'execute' an alloca, we create a temporary global variable
2537 /// to represent its body. This vector is needed so we can delete the
2538 /// temporary globals when we are done.
2539 SmallVector<GlobalVariable*, 32> AllocaTmps;
2541 /// Invariants - These global variables have been marked invariant by the
2542 /// static constructor.
2543 SmallPtrSet<GlobalVariable*, 8> Invariants;
2545 /// SimpleConstants - These are constants we have checked and know to be
2546 /// simple enough to live in a static initializer of a global.
2547 SmallPtrSet<Constant*, 8> SimpleConstants;
2549 const DataLayout *TD;
2550 const TargetLibraryInfo *TLI;
2553 } // anonymous namespace
2555 /// ComputeLoadResult - Return the value that would be computed by a load from
2556 /// P after the stores reflected by 'memory' have been performed. If we can't
2557 /// decide, return null.
2558 Constant *Evaluator::ComputeLoadResult(Constant *P) {
2559 // If this memory location has been recently stored, use the stored value: it
2560 // is the most up-to-date.
2561 DenseMap<Constant*, Constant*>::const_iterator I = MutatedMemory.find(P);
2562 if (I != MutatedMemory.end()) return I->second;
2565 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
2566 if (GV->hasDefinitiveInitializer())
2567 return GV->getInitializer();
2571 // Handle a constantexpr getelementptr.
2572 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(P))
2573 if (CE->getOpcode() == Instruction::GetElementPtr &&
2574 isa<GlobalVariable>(CE->getOperand(0))) {
2575 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0));
2576 if (GV->hasDefinitiveInitializer())
2577 return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE);
2580 return 0; // don't know how to evaluate.
2583 /// EvaluateBlock - Evaluate all instructions in block BB, returning true if
2584 /// successful, false if we can't evaluate it. NewBB returns the next BB that
2585 /// control flows into, or null upon return.
2586 bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
2587 BasicBlock *&NextBB) {
2588 // This is the main evaluation loop.
2590 Constant *InstResult = 0;
2592 if (StoreInst *SI = dyn_cast<StoreInst>(CurInst)) {
2593 if (!SI->isSimple()) return false; // no volatile/atomic accesses.
2594 Constant *Ptr = getVal(SI->getOperand(1));
2595 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
2596 Ptr = ConstantFoldConstantExpression(CE, TD, TLI);
2597 if (!isSimpleEnoughPointerToCommit(Ptr))
2598 // If this is too complex for us to commit, reject it.
2601 Constant *Val = getVal(SI->getOperand(0));
2603 // If this might be too difficult for the backend to handle (e.g. the addr
2604 // of one global variable divided by another) then we can't commit it.
2605 if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, TD))
2608 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
2609 if (CE->getOpcode() == Instruction::BitCast) {
2610 // If we're evaluating a store through a bitcast, then we need
2611 // to pull the bitcast off the pointer type and push it onto the
2613 Ptr = CE->getOperand(0);
2615 Type *NewTy = cast<PointerType>(Ptr->getType())->getElementType();
2617 // In order to push the bitcast onto the stored value, a bitcast
2618 // from NewTy to Val's type must be legal. If it's not, we can try
2619 // introspecting NewTy to find a legal conversion.
2620 while (!Val->getType()->canLosslesslyBitCastTo(NewTy)) {
2621 // If NewTy is a struct, we can convert the pointer to the struct
2622 // into a pointer to its first member.
2623 // FIXME: This could be extended to support arrays as well.
2624 if (StructType *STy = dyn_cast<StructType>(NewTy)) {
2625 NewTy = STy->getTypeAtIndex(0U);
2627 IntegerType *IdxTy = IntegerType::get(NewTy->getContext(), 32);
2628 Constant *IdxZero = ConstantInt::get(IdxTy, 0, false);
2629 Constant * const IdxList[] = {IdxZero, IdxZero};
2631 Ptr = ConstantExpr::getGetElementPtr(Ptr, IdxList);
2632 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
2633 Ptr = ConstantFoldConstantExpression(CE, TD, TLI);
2635 // If we can't improve the situation by introspecting NewTy,
2636 // we have to give up.
2642 // If we found compatible types, go ahead and push the bitcast
2643 // onto the stored value.
2644 Val = ConstantExpr::getBitCast(Val, NewTy);
2647 MutatedMemory[Ptr] = Val;
2648 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CurInst)) {
2649 InstResult = ConstantExpr::get(BO->getOpcode(),
2650 getVal(BO->getOperand(0)),
2651 getVal(BO->getOperand(1)));
2652 } else if (CmpInst *CI = dyn_cast<CmpInst>(CurInst)) {
2653 InstResult = ConstantExpr::getCompare(CI->getPredicate(),
2654 getVal(CI->getOperand(0)),
2655 getVal(CI->getOperand(1)));
2656 } else if (CastInst *CI = dyn_cast<CastInst>(CurInst)) {
2657 InstResult = ConstantExpr::getCast(CI->getOpcode(),
2658 getVal(CI->getOperand(0)),
2660 } else if (SelectInst *SI = dyn_cast<SelectInst>(CurInst)) {
2661 InstResult = ConstantExpr::getSelect(getVal(SI->getOperand(0)),
2662 getVal(SI->getOperand(1)),
2663 getVal(SI->getOperand(2)));
2664 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurInst)) {
2665 Constant *P = getVal(GEP->getOperand(0));
2666 SmallVector<Constant*, 8> GEPOps;
2667 for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end();
2669 GEPOps.push_back(getVal(*i));
2671 ConstantExpr::getGetElementPtr(P, GEPOps,
2672 cast<GEPOperator>(GEP)->isInBounds());
2673 } else if (LoadInst *LI = dyn_cast<LoadInst>(CurInst)) {
2674 if (!LI->isSimple()) return false; // no volatile/atomic accesses.
2675 Constant *Ptr = getVal(LI->getOperand(0));
2676 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
2677 Ptr = ConstantFoldConstantExpression(CE, TD, TLI);
2678 InstResult = ComputeLoadResult(Ptr);
2679 if (InstResult == 0) return false; // Could not evaluate load.
2680 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(CurInst)) {
2681 if (AI->isArrayAllocation()) return false; // Cannot handle array allocs.
2682 Type *Ty = AI->getType()->getElementType();
2683 AllocaTmps.push_back(new GlobalVariable(Ty, false,
2684 GlobalValue::InternalLinkage,
2685 UndefValue::get(Ty),
2687 InstResult = AllocaTmps.back();
2688 } else if (isa<CallInst>(CurInst) || isa<InvokeInst>(CurInst)) {
2689 CallSite CS(CurInst);
2691 // Debug info can safely be ignored here.
2692 if (isa<DbgInfoIntrinsic>(CS.getInstruction())) {
2697 // Cannot handle inline asm.
2698 if (isa<InlineAsm>(CS.getCalledValue())) return false;
2700 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) {
2701 if (MemSetInst *MSI = dyn_cast<MemSetInst>(II)) {
2702 if (MSI->isVolatile()) return false;
2703 Constant *Ptr = getVal(MSI->getDest());
2704 Constant *Val = getVal(MSI->getValue());
2705 Constant *DestVal = ComputeLoadResult(getVal(Ptr));
2706 if (Val->isNullValue() && DestVal && DestVal->isNullValue()) {
2707 // This memset is a no-op.
2713 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
2714 II->getIntrinsicID() == Intrinsic::lifetime_end) {
2719 if (II->getIntrinsicID() == Intrinsic::invariant_start) {
2720 // We don't insert an entry into Values, as it doesn't have a
2721 // meaningful return value.
2722 if (!II->use_empty())
2724 ConstantInt *Size = cast<ConstantInt>(II->getArgOperand(0));
2725 Value *PtrArg = getVal(II->getArgOperand(1));
2726 Value *Ptr = PtrArg->stripPointerCasts();
2727 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) {
2728 Type *ElemTy = cast<PointerType>(GV->getType())->getElementType();
2729 if (!Size->isAllOnesValue() &&
2730 Size->getValue().getLimitedValue() >=
2731 TD->getTypeStoreSize(ElemTy))
2732 Invariants.insert(GV);
2734 // Continue even if we do nothing.
2741 // Resolve function pointers.
2742 Function *Callee = dyn_cast<Function>(getVal(CS.getCalledValue()));
2743 if (!Callee || Callee->mayBeOverridden())
2744 return false; // Cannot resolve.
2746 SmallVector<Constant*, 8> Formals;
2747 for (User::op_iterator i = CS.arg_begin(), e = CS.arg_end(); i != e; ++i)
2748 Formals.push_back(getVal(*i));
2750 if (Callee->isDeclaration()) {
2751 // If this is a function we can constant fold, do it.
2752 if (Constant *C = ConstantFoldCall(Callee, Formals, TLI)) {
2758 if (Callee->getFunctionType()->isVarArg())
2762 // Execute the call, if successful, use the return value.
2763 ValueStack.push_back(new DenseMap<Value*, Constant*>);
2764 if (!EvaluateFunction(Callee, RetVal, Formals))
2766 delete ValueStack.pop_back_val();
2767 InstResult = RetVal;
2769 } else if (isa<TerminatorInst>(CurInst)) {
2770 if (BranchInst *BI = dyn_cast<BranchInst>(CurInst)) {
2771 if (BI->isUnconditional()) {
2772 NextBB = BI->getSuccessor(0);
2775 dyn_cast<ConstantInt>(getVal(BI->getCondition()));
2776 if (!Cond) return false; // Cannot determine.
2778 NextBB = BI->getSuccessor(!Cond->getZExtValue());
2780 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(CurInst)) {
2782 dyn_cast<ConstantInt>(getVal(SI->getCondition()));
2783 if (!Val) return false; // Cannot determine.
2784 NextBB = SI->findCaseValue(Val).getCaseSuccessor();
2785 } else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(CurInst)) {
2786 Value *Val = getVal(IBI->getAddress())->stripPointerCasts();
2787 if (BlockAddress *BA = dyn_cast<BlockAddress>(Val))
2788 NextBB = BA->getBasicBlock();
2790 return false; // Cannot determine.
2791 } else if (isa<ReturnInst>(CurInst)) {
2794 // invoke, unwind, resume, unreachable.
2795 return false; // Cannot handle this terminator.
2798 // We succeeded at evaluating this block!
2801 // Did not know how to evaluate this!
2805 if (!CurInst->use_empty()) {
2806 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(InstResult))
2807 InstResult = ConstantFoldConstantExpression(CE, TD, TLI);
2809 setVal(CurInst, InstResult);
2812 // If we just processed an invoke, we finished evaluating the block.
2813 if (InvokeInst *II = dyn_cast<InvokeInst>(CurInst)) {
2814 NextBB = II->getNormalDest();
2818 // Advance program counter.
2823 /// EvaluateFunction - Evaluate a call to function F, returning true if
2824 /// successful, false if we can't evaluate it. ActualArgs contains the formal
2825 /// arguments for the function.
2826 bool Evaluator::EvaluateFunction(Function *F, Constant *&RetVal,
2827 const SmallVectorImpl<Constant*> &ActualArgs) {
2828 // Check to see if this function is already executing (recursion). If so,
2829 // bail out. TODO: we might want to accept limited recursion.
2830 if (std::find(CallStack.begin(), CallStack.end(), F) != CallStack.end())
2833 CallStack.push_back(F);
2835 // Initialize arguments to the incoming values specified.
2837 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E;
2839 setVal(AI, ActualArgs[ArgNo]);
2841 // ExecutedBlocks - We only handle non-looping, non-recursive code. As such,
2842 // we can only evaluate any one basic block at most once. This set keeps
2843 // track of what we have executed so we can detect recursive cases etc.
2844 SmallPtrSet<BasicBlock*, 32> ExecutedBlocks;
2846 // CurBB - The current basic block we're evaluating.
2847 BasicBlock *CurBB = F->begin();
2849 BasicBlock::iterator CurInst = CurBB->begin();
2852 BasicBlock *NextBB = 0; // Initialized to avoid compiler warnings.
2853 if (!EvaluateBlock(CurInst, NextBB))
2857 // Successfully running until there's no next block means that we found
2858 // the return. Fill it the return value and pop the call stack.
2859 ReturnInst *RI = cast<ReturnInst>(CurBB->getTerminator());
2860 if (RI->getNumOperands())
2861 RetVal = getVal(RI->getOperand(0));
2862 CallStack.pop_back();
2866 // Okay, we succeeded in evaluating this control flow. See if we have
2867 // executed the new block before. If so, we have a looping function,
2868 // which we cannot evaluate in reasonable time.
2869 if (!ExecutedBlocks.insert(NextBB))
2870 return false; // looped!
2872 // Okay, we have never been in this block before. Check to see if there
2873 // are any PHI nodes. If so, evaluate them with information about where
2876 for (CurInst = NextBB->begin();
2877 (PN = dyn_cast<PHINode>(CurInst)); ++CurInst)
2878 setVal(PN, getVal(PN->getIncomingValueForBlock(CurBB)));
2880 // Advance to the next block.
2885 /// EvaluateStaticConstructor - Evaluate static constructors in the function, if
2886 /// we can. Return true if we can, false otherwise.
2887 static bool EvaluateStaticConstructor(Function *F, const DataLayout *TD,
2888 const TargetLibraryInfo *TLI) {
2889 // Call the function.
2890 Evaluator Eval(TD, TLI);
2891 Constant *RetValDummy;
2892 bool EvalSuccess = Eval.EvaluateFunction(F, RetValDummy,
2893 SmallVector<Constant*, 0>());
2896 // We succeeded at evaluation: commit the result.
2897 DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '"
2898 << F->getName() << "' to " << Eval.getMutatedMemory().size()
2900 for (DenseMap<Constant*, Constant*>::const_iterator I =
2901 Eval.getMutatedMemory().begin(), E = Eval.getMutatedMemory().end();
2903 CommitValueTo(I->second, I->first);
2904 for (SmallPtrSet<GlobalVariable*, 8>::const_iterator I =
2905 Eval.getInvariants().begin(), E = Eval.getInvariants().end();
2907 (*I)->setConstant(true);
2913 /// OptimizeGlobalCtorsList - Simplify and evaluation global ctors if possible.
2914 /// Return true if anything changed.
2915 bool GlobalOpt::OptimizeGlobalCtorsList(GlobalVariable *&GCL) {
2916 std::vector<Function*> Ctors = ParseGlobalCtors(GCL);
2917 bool MadeChange = false;
2918 if (Ctors.empty()) return false;
2920 // Loop over global ctors, optimizing them when we can.
2921 for (unsigned i = 0; i != Ctors.size(); ++i) {
2922 Function *F = Ctors[i];
2923 // Found a null terminator in the middle of the list, prune off the rest of
2926 if (i != Ctors.size()-1) {
2933 // We cannot simplify external ctor functions.
2934 if (F->empty()) continue;
2936 // If we can evaluate the ctor at compile time, do.
2937 if (EvaluateStaticConstructor(F, TD, TLI)) {
2938 Ctors.erase(Ctors.begin()+i);
2941 ++NumCtorsEvaluated;
2946 if (!MadeChange) return false;
2948 GCL = InstallGlobalCtors(GCL, Ctors);
2952 bool GlobalOpt::OptimizeGlobalAliases(Module &M) {
2953 bool Changed = false;
2955 for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end();
2957 Module::alias_iterator J = I++;
2958 // Aliases without names cannot be referenced outside this module.
2959 if (!J->hasName() && !J->isDeclaration())
2960 J->setLinkage(GlobalValue::InternalLinkage);
2961 // If the aliasee may change at link time, nothing can be done - bail out.
2962 if (J->mayBeOverridden())
2965 Constant *Aliasee = J->getAliasee();
2966 GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts());
2967 Target->removeDeadConstantUsers();
2968 bool hasOneUse = Target->hasOneUse() && Aliasee->hasOneUse();
2970 // Make all users of the alias use the aliasee instead.
2971 if (!J->use_empty()) {
2972 J->replaceAllUsesWith(Aliasee);
2973 ++NumAliasesResolved;
2977 // If the alias is externally visible, we may still be able to simplify it.
2978 if (!J->hasLocalLinkage()) {
2979 // If the aliasee has internal linkage, give it the name and linkage
2980 // of the alias, and delete the alias. This turns:
2981 // define internal ... @f(...)
2982 // @a = alias ... @f
2984 // define ... @a(...)
2985 if (!Target->hasLocalLinkage())
2988 // Do not perform the transform if multiple aliases potentially target the
2989 // aliasee. This check also ensures that it is safe to replace the section
2990 // and other attributes of the aliasee with those of the alias.
2994 // Give the aliasee the name, linkage and other attributes of the alias.
2995 Target->takeName(J);
2996 Target->setLinkage(J->getLinkage());
2997 Target->GlobalValue::copyAttributesFrom(J);
3000 // Delete the alias.
3001 M.getAliasList().erase(J);
3002 ++NumAliasesRemoved;
3009 static Function *FindCXAAtExit(Module &M, TargetLibraryInfo *TLI) {
3010 if (!TLI->has(LibFunc::cxa_atexit))
3013 Function *Fn = M.getFunction(TLI->getName(LibFunc::cxa_atexit));
3018 FunctionType *FTy = Fn->getFunctionType();
3020 // Checking that the function has the right return type, the right number of
3021 // parameters and that they all have pointer types should be enough.
3022 if (!FTy->getReturnType()->isIntegerTy() ||
3023 FTy->getNumParams() != 3 ||
3024 !FTy->getParamType(0)->isPointerTy() ||
3025 !FTy->getParamType(1)->isPointerTy() ||
3026 !FTy->getParamType(2)->isPointerTy())
3032 /// cxxDtorIsEmpty - Returns whether the given function is an empty C++
3033 /// destructor and can therefore be eliminated.
3034 /// Note that we assume that other optimization passes have already simplified
3035 /// the code so we only look for a function with a single basic block, where
3036 /// the only allowed instructions are 'ret', 'call' to an empty C++ dtor and
3037 /// other side-effect free instructions.
3038 static bool cxxDtorIsEmpty(const Function &Fn,
3039 SmallPtrSet<const Function *, 8> &CalledFunctions) {
3040 // FIXME: We could eliminate C++ destructors if they're readonly/readnone and
3041 // nounwind, but that doesn't seem worth doing.
3042 if (Fn.isDeclaration())
3045 if (++Fn.begin() != Fn.end())
3048 const BasicBlock &EntryBlock = Fn.getEntryBlock();
3049 for (BasicBlock::const_iterator I = EntryBlock.begin(), E = EntryBlock.end();
3051 if (const CallInst *CI = dyn_cast<CallInst>(I)) {
3052 // Ignore debug intrinsics.
3053 if (isa<DbgInfoIntrinsic>(CI))
3056 const Function *CalledFn = CI->getCalledFunction();
3061 SmallPtrSet<const Function *, 8> NewCalledFunctions(CalledFunctions);
3063 // Don't treat recursive functions as empty.
3064 if (!NewCalledFunctions.insert(CalledFn))
3067 if (!cxxDtorIsEmpty(*CalledFn, NewCalledFunctions))
3069 } else if (isa<ReturnInst>(*I))
3070 return true; // We're done.
3071 else if (I->mayHaveSideEffects())
3072 return false; // Destructor with side effects, bail.
3078 bool GlobalOpt::OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) {
3079 /// Itanium C++ ABI p3.3.5:
3081 /// After constructing a global (or local static) object, that will require
3082 /// destruction on exit, a termination function is registered as follows:
3084 /// extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d );
3086 /// This registration, e.g. __cxa_atexit(f,p,d), is intended to cause the
3087 /// call f(p) when DSO d is unloaded, before all such termination calls
3088 /// registered before this one. It returns zero if registration is
3089 /// successful, nonzero on failure.
3091 // This pass will look for calls to __cxa_atexit where the function is trivial
3093 bool Changed = false;
3095 for (Function::use_iterator I = CXAAtExitFn->use_begin(),
3096 E = CXAAtExitFn->use_end(); I != E;) {
3097 // We're only interested in calls. Theoretically, we could handle invoke
3098 // instructions as well, but neither llvm-gcc nor clang generate invokes
3100 CallInst *CI = dyn_cast<CallInst>(*I++);
3105 dyn_cast<Function>(CI->getArgOperand(0)->stripPointerCasts());
3109 SmallPtrSet<const Function *, 8> CalledFunctions;
3110 if (!cxxDtorIsEmpty(*DtorFn, CalledFunctions))
3113 // Just remove the call.
3114 CI->replaceAllUsesWith(Constant::getNullValue(CI->getType()));
3115 CI->eraseFromParent();
3117 ++NumCXXDtorsRemoved;
3125 bool GlobalOpt::runOnModule(Module &M) {
3126 bool Changed = false;
3128 TD = getAnalysisIfAvailable<DataLayout>();
3129 TLI = &getAnalysis<TargetLibraryInfo>();
3131 // Try to find the llvm.globalctors list.
3132 GlobalVariable *GlobalCtors = FindGlobalCtors(M);
3134 Function *CXAAtExitFn = FindCXAAtExit(M, TLI);
3136 bool LocalChange = true;
3137 while (LocalChange) {
3138 LocalChange = false;
3140 // Delete functions that are trivially dead, ccc -> fastcc
3141 LocalChange |= OptimizeFunctions(M);
3143 // Optimize global_ctors list.
3145 LocalChange |= OptimizeGlobalCtorsList(GlobalCtors);
3147 // Optimize non-address-taken globals.
3148 LocalChange |= OptimizeGlobalVars(M);
3150 // Resolve aliases, when possible.
3151 LocalChange |= OptimizeGlobalAliases(M);
3153 // Try to remove trivial global destructors.
3155 LocalChange |= OptimizeEmptyGlobalCXXDtors(CXAAtExitFn);
3157 Changed |= LocalChange;
3160 // TODO: Move all global ctors functions to the end of the module for code