1 //===- GlobalOpt.cpp - Optimize Global Variables --------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass transforms simple global variables that never have their address
11 // taken. If obviously true, it marks read/write globals as constant, deletes
12 // variables only stored to, etc.
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "globalopt"
17 #include "llvm/Transforms/IPO.h"
18 #include "llvm/CallingConv.h"
19 #include "llvm/Constants.h"
20 #include "llvm/DerivedTypes.h"
21 #include "llvm/Instructions.h"
22 #include "llvm/IntrinsicInst.h"
23 #include "llvm/LLVMContext.h"
24 #include "llvm/Module.h"
25 #include "llvm/Pass.h"
26 #include "llvm/Analysis/ConstantFolding.h"
27 #include "llvm/Analysis/MallocHelper.h"
28 #include "llvm/Target/TargetData.h"
29 #include "llvm/Support/CallSite.h"
30 #include "llvm/Support/Compiler.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/GetElementPtrTypeIterator.h"
34 #include "llvm/Support/MathExtras.h"
35 #include "llvm/Support/raw_ostream.h"
36 #include "llvm/ADT/DenseMap.h"
37 #include "llvm/ADT/SmallPtrSet.h"
38 #include "llvm/ADT/SmallVector.h"
39 #include "llvm/ADT/Statistic.h"
40 #include "llvm/ADT/STLExtras.h"
44 STATISTIC(NumMarked , "Number of globals marked constant");
45 STATISTIC(NumSRA , "Number of aggregate globals broken into scalars");
46 STATISTIC(NumHeapSRA , "Number of heap objects SRA'd");
47 STATISTIC(NumSubstitute,"Number of globals with initializers stored into them");
48 STATISTIC(NumDeleted , "Number of globals deleted");
49 STATISTIC(NumFnDeleted , "Number of functions deleted");
50 STATISTIC(NumGlobUses , "Number of global uses devirtualized");
51 STATISTIC(NumLocalized , "Number of globals localized");
52 STATISTIC(NumShrunkToBool , "Number of global vars shrunk to booleans");
53 STATISTIC(NumFastCallFns , "Number of functions converted to fastcc");
54 STATISTIC(NumCtorsEvaluated, "Number of static ctors evaluated");
55 STATISTIC(NumNestRemoved , "Number of nest attributes removed");
56 STATISTIC(NumAliasesResolved, "Number of global aliases resolved");
57 STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated");
60 struct VISIBILITY_HIDDEN GlobalOpt : public ModulePass {
61 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
63 static char ID; // Pass identification, replacement for typeid
64 GlobalOpt() : ModulePass(&ID) {}
66 bool runOnModule(Module &M);
69 GlobalVariable *FindGlobalCtors(Module &M);
70 bool OptimizeFunctions(Module &M);
71 bool OptimizeGlobalVars(Module &M);
72 bool OptimizeGlobalAliases(Module &M);
73 bool OptimizeGlobalCtorsList(GlobalVariable *&GCL);
74 bool ProcessInternalGlobal(GlobalVariable *GV,Module::global_iterator &GVI);
78 char GlobalOpt::ID = 0;
79 static RegisterPass<GlobalOpt> X("globalopt", "Global Variable Optimizer");
81 ModulePass *llvm::createGlobalOptimizerPass() { return new GlobalOpt(); }
85 /// GlobalStatus - As we analyze each global, keep track of some information
86 /// about it. If we find out that the address of the global is taken, none of
87 /// this info will be accurate.
88 struct VISIBILITY_HIDDEN GlobalStatus {
89 /// isLoaded - True if the global is ever loaded. If the global isn't ever
90 /// loaded it can be deleted.
93 /// StoredType - Keep track of what stores to the global look like.
96 /// NotStored - There is no store to this global. It can thus be marked
100 /// isInitializerStored - This global is stored to, but the only thing
101 /// stored is the constant it was initialized with. This is only tracked
102 /// for scalar globals.
105 /// isStoredOnce - This global is stored to, but only its initializer and
106 /// one other value is ever stored to it. If this global isStoredOnce, we
107 /// track the value stored to it in StoredOnceValue below. This is only
108 /// tracked for scalar globals.
111 /// isStored - This global is stored to by multiple values or something else
112 /// that we cannot track.
116 /// StoredOnceValue - If only one value (besides the initializer constant) is
117 /// ever stored to this global, keep track of what value it is.
118 Value *StoredOnceValue;
120 /// AccessingFunction/HasMultipleAccessingFunctions - These start out
121 /// null/false. When the first accessing function is noticed, it is recorded.
122 /// When a second different accessing function is noticed,
123 /// HasMultipleAccessingFunctions is set to true.
124 Function *AccessingFunction;
125 bool HasMultipleAccessingFunctions;
127 /// HasNonInstructionUser - Set to true if this global has a user that is not
128 /// an instruction (e.g. a constant expr or GV initializer).
129 bool HasNonInstructionUser;
131 /// HasPHIUser - Set to true if this global has a user that is a PHI node.
134 GlobalStatus() : isLoaded(false), StoredType(NotStored), StoredOnceValue(0),
135 AccessingFunction(0), HasMultipleAccessingFunctions(false),
136 HasNonInstructionUser(false), HasPHIUser(false) {}
141 // SafeToDestroyConstant - It is safe to destroy a constant iff it is only used
142 // by constants itself. Note that constants cannot be cyclic, so this test is
143 // pretty easy to implement recursively.
145 static bool SafeToDestroyConstant(Constant *C) {
146 if (isa<GlobalValue>(C)) return false;
148 for (Value::use_iterator UI = C->use_begin(), E = C->use_end(); UI != E; ++UI)
149 if (Constant *CU = dyn_cast<Constant>(*UI)) {
150 if (!SafeToDestroyConstant(CU)) return false;
157 /// AnalyzeGlobal - Look at all uses of the global and fill in the GlobalStatus
158 /// structure. If the global has its address taken, return true to indicate we
159 /// can't do anything with it.
161 static bool AnalyzeGlobal(Value *V, GlobalStatus &GS,
162 SmallPtrSet<PHINode*, 16> &PHIUsers) {
163 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ++UI)
164 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(*UI)) {
165 GS.HasNonInstructionUser = true;
167 if (AnalyzeGlobal(CE, GS, PHIUsers)) return true;
169 } else if (Instruction *I = dyn_cast<Instruction>(*UI)) {
170 if (!GS.HasMultipleAccessingFunctions) {
171 Function *F = I->getParent()->getParent();
172 if (GS.AccessingFunction == 0)
173 GS.AccessingFunction = F;
174 else if (GS.AccessingFunction != F)
175 GS.HasMultipleAccessingFunctions = true;
177 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
179 if (LI->isVolatile()) return true; // Don't hack on volatile loads.
180 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
181 // Don't allow a store OF the address, only stores TO the address.
182 if (SI->getOperand(0) == V) return true;
184 if (SI->isVolatile()) return true; // Don't hack on volatile stores.
186 // If this is a direct store to the global (i.e., the global is a scalar
187 // value, not an aggregate), keep more specific information about
189 if (GS.StoredType != GlobalStatus::isStored) {
190 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(SI->getOperand(1))){
191 Value *StoredVal = SI->getOperand(0);
192 if (StoredVal == GV->getInitializer()) {
193 if (GS.StoredType < GlobalStatus::isInitializerStored)
194 GS.StoredType = GlobalStatus::isInitializerStored;
195 } else if (isa<LoadInst>(StoredVal) &&
196 cast<LoadInst>(StoredVal)->getOperand(0) == GV) {
198 if (GS.StoredType < GlobalStatus::isInitializerStored)
199 GS.StoredType = GlobalStatus::isInitializerStored;
200 } else if (GS.StoredType < GlobalStatus::isStoredOnce) {
201 GS.StoredType = GlobalStatus::isStoredOnce;
202 GS.StoredOnceValue = StoredVal;
203 } else if (GS.StoredType == GlobalStatus::isStoredOnce &&
204 GS.StoredOnceValue == StoredVal) {
207 GS.StoredType = GlobalStatus::isStored;
210 GS.StoredType = GlobalStatus::isStored;
213 } else if (isa<GetElementPtrInst>(I)) {
214 if (AnalyzeGlobal(I, GS, PHIUsers)) return true;
215 } else if (isa<SelectInst>(I)) {
216 if (AnalyzeGlobal(I, GS, PHIUsers)) return true;
217 } else if (PHINode *PN = dyn_cast<PHINode>(I)) {
218 // PHI nodes we can check just like select or GEP instructions, but we
219 // have to be careful about infinite recursion.
220 if (PHIUsers.insert(PN)) // Not already visited.
221 if (AnalyzeGlobal(I, GS, PHIUsers)) return true;
222 GS.HasPHIUser = true;
223 } else if (isa<CmpInst>(I)) {
224 } else if (isa<MemTransferInst>(I)) {
225 if (I->getOperand(1) == V)
226 GS.StoredType = GlobalStatus::isStored;
227 if (I->getOperand(2) == V)
229 } else if (isa<MemSetInst>(I)) {
230 assert(I->getOperand(1) == V && "Memset only takes one pointer!");
231 GS.StoredType = GlobalStatus::isStored;
233 return true; // Any other non-load instruction might take address!
235 } else if (Constant *C = dyn_cast<Constant>(*UI)) {
236 GS.HasNonInstructionUser = true;
237 // We might have a dead and dangling constant hanging off of here.
238 if (!SafeToDestroyConstant(C))
241 GS.HasNonInstructionUser = true;
242 // Otherwise must be some other user.
249 static Constant *getAggregateConstantElement(Constant *Agg, Constant *Idx,
250 LLVMContext &Context) {
251 ConstantInt *CI = dyn_cast<ConstantInt>(Idx);
253 unsigned IdxV = CI->getZExtValue();
255 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Agg)) {
256 if (IdxV < CS->getNumOperands()) return CS->getOperand(IdxV);
257 } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Agg)) {
258 if (IdxV < CA->getNumOperands()) return CA->getOperand(IdxV);
259 } else if (ConstantVector *CP = dyn_cast<ConstantVector>(Agg)) {
260 if (IdxV < CP->getNumOperands()) return CP->getOperand(IdxV);
261 } else if (isa<ConstantAggregateZero>(Agg)) {
262 if (const StructType *STy = dyn_cast<StructType>(Agg->getType())) {
263 if (IdxV < STy->getNumElements())
264 return Constant::getNullValue(STy->getElementType(IdxV));
265 } else if (const SequentialType *STy =
266 dyn_cast<SequentialType>(Agg->getType())) {
267 return Constant::getNullValue(STy->getElementType());
269 } else if (isa<UndefValue>(Agg)) {
270 if (const StructType *STy = dyn_cast<StructType>(Agg->getType())) {
271 if (IdxV < STy->getNumElements())
272 return UndefValue::get(STy->getElementType(IdxV));
273 } else if (const SequentialType *STy =
274 dyn_cast<SequentialType>(Agg->getType())) {
275 return UndefValue::get(STy->getElementType());
282 /// CleanupConstantGlobalUsers - We just marked GV constant. Loop over all
283 /// users of the global, cleaning up the obvious ones. This is largely just a
284 /// quick scan over the use list to clean up the easy and obvious cruft. This
285 /// returns true if it made a change.
286 static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
287 LLVMContext &Context) {
288 bool Changed = false;
289 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;) {
292 if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
294 // Replace the load with the initializer.
295 LI->replaceAllUsesWith(Init);
296 LI->eraseFromParent();
299 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
300 // Store must be unreachable or storing Init into the global.
301 SI->eraseFromParent();
303 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
304 if (CE->getOpcode() == Instruction::GetElementPtr) {
305 Constant *SubInit = 0;
307 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
308 Changed |= CleanupConstantGlobalUsers(CE, SubInit, Context);
309 } else if (CE->getOpcode() == Instruction::BitCast &&
310 isa<PointerType>(CE->getType())) {
311 // Pointer cast, delete any stores and memsets to the global.
312 Changed |= CleanupConstantGlobalUsers(CE, 0, Context);
315 if (CE->use_empty()) {
316 CE->destroyConstant();
319 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
320 // Do not transform "gepinst (gep constexpr (GV))" here, because forming
321 // "gepconstexpr (gep constexpr (GV))" will cause the two gep's to fold
322 // and will invalidate our notion of what Init is.
323 Constant *SubInit = 0;
324 if (!isa<ConstantExpr>(GEP->getOperand(0))) {
326 dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP, Context));
327 if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr)
328 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
330 Changed |= CleanupConstantGlobalUsers(GEP, SubInit, Context);
332 if (GEP->use_empty()) {
333 GEP->eraseFromParent();
336 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U)) { // memset/cpy/mv
337 if (MI->getRawDest() == V) {
338 MI->eraseFromParent();
342 } else if (Constant *C = dyn_cast<Constant>(U)) {
343 // If we have a chain of dead constantexprs or other things dangling from
344 // us, and if they are all dead, nuke them without remorse.
345 if (SafeToDestroyConstant(C)) {
346 C->destroyConstant();
347 // This could have invalidated UI, start over from scratch.
348 CleanupConstantGlobalUsers(V, Init, Context);
356 /// isSafeSROAElementUse - Return true if the specified instruction is a safe
357 /// user of a derived expression from a global that we want to SROA.
358 static bool isSafeSROAElementUse(Value *V) {
359 // We might have a dead and dangling constant hanging off of here.
360 if (Constant *C = dyn_cast<Constant>(V))
361 return SafeToDestroyConstant(C);
363 Instruction *I = dyn_cast<Instruction>(V);
364 if (!I) return false;
367 if (isa<LoadInst>(I)) return true;
369 // Stores *to* the pointer are ok.
370 if (StoreInst *SI = dyn_cast<StoreInst>(I))
371 return SI->getOperand(0) != V;
373 // Otherwise, it must be a GEP.
374 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I);
375 if (GEPI == 0) return false;
377 if (GEPI->getNumOperands() < 3 || !isa<Constant>(GEPI->getOperand(1)) ||
378 !cast<Constant>(GEPI->getOperand(1))->isNullValue())
381 for (Value::use_iterator I = GEPI->use_begin(), E = GEPI->use_end();
383 if (!isSafeSROAElementUse(*I))
389 /// IsUserOfGlobalSafeForSRA - U is a direct user of the specified global value.
390 /// Look at it and its uses and decide whether it is safe to SROA this global.
392 static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) {
393 // The user of the global must be a GEP Inst or a ConstantExpr GEP.
394 if (!isa<GetElementPtrInst>(U) &&
395 (!isa<ConstantExpr>(U) ||
396 cast<ConstantExpr>(U)->getOpcode() != Instruction::GetElementPtr))
399 // Check to see if this ConstantExpr GEP is SRA'able. In particular, we
400 // don't like < 3 operand CE's, and we don't like non-constant integer
401 // indices. This enforces that all uses are 'gep GV, 0, C, ...' for some
403 if (U->getNumOperands() < 3 || !isa<Constant>(U->getOperand(1)) ||
404 !cast<Constant>(U->getOperand(1))->isNullValue() ||
405 !isa<ConstantInt>(U->getOperand(2)))
408 gep_type_iterator GEPI = gep_type_begin(U), E = gep_type_end(U);
409 ++GEPI; // Skip over the pointer index.
411 // If this is a use of an array allocation, do a bit more checking for sanity.
412 if (const ArrayType *AT = dyn_cast<ArrayType>(*GEPI)) {
413 uint64_t NumElements = AT->getNumElements();
414 ConstantInt *Idx = cast<ConstantInt>(U->getOperand(2));
416 // Check to make sure that index falls within the array. If not,
417 // something funny is going on, so we won't do the optimization.
419 if (Idx->getZExtValue() >= NumElements)
422 // We cannot scalar repl this level of the array unless any array
423 // sub-indices are in-range constants. In particular, consider:
424 // A[0][i]. We cannot know that the user isn't doing invalid things like
425 // allowing i to index an out-of-range subscript that accesses A[1].
427 // Scalar replacing *just* the outer index of the array is probably not
428 // going to be a win anyway, so just give up.
429 for (++GEPI; // Skip array index.
432 uint64_t NumElements;
433 if (const ArrayType *SubArrayTy = dyn_cast<ArrayType>(*GEPI))
434 NumElements = SubArrayTy->getNumElements();
435 else if (const VectorType *SubVectorTy = dyn_cast<VectorType>(*GEPI))
436 NumElements = SubVectorTy->getNumElements();
438 assert(isa<StructType>(*GEPI) &&
439 "Indexed GEP type is not array, vector, or struct!");
443 ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand());
444 if (!IdxVal || IdxVal->getZExtValue() >= NumElements)
449 for (Value::use_iterator I = U->use_begin(), E = U->use_end(); I != E; ++I)
450 if (!isSafeSROAElementUse(*I))
455 /// GlobalUsersSafeToSRA - Look at all uses of the global and decide whether it
456 /// is safe for us to perform this transformation.
458 static bool GlobalUsersSafeToSRA(GlobalValue *GV) {
459 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end();
461 if (!IsUserOfGlobalSafeForSRA(*UI, GV))
468 /// SRAGlobal - Perform scalar replacement of aggregates on the specified global
469 /// variable. This opens the door for other optimizations by exposing the
470 /// behavior of the program in a more fine-grained way. We have determined that
471 /// this transformation is safe already. We return the first global variable we
472 /// insert so that the caller can reprocess it.
473 static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD,
474 LLVMContext &Context) {
475 // Make sure this global only has simple uses that we can SRA.
476 if (!GlobalUsersSafeToSRA(GV))
479 assert(GV->hasLocalLinkage() && !GV->isConstant());
480 Constant *Init = GV->getInitializer();
481 const Type *Ty = Init->getType();
483 std::vector<GlobalVariable*> NewGlobals;
484 Module::GlobalListType &Globals = GV->getParent()->getGlobalList();
486 // Get the alignment of the global, either explicit or target-specific.
487 unsigned StartAlignment = GV->getAlignment();
488 if (StartAlignment == 0)
489 StartAlignment = TD.getABITypeAlignment(GV->getType());
491 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
492 NewGlobals.reserve(STy->getNumElements());
493 const StructLayout &Layout = *TD.getStructLayout(STy);
494 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
495 Constant *In = getAggregateConstantElement(Init,
496 ConstantInt::get(Type::getInt32Ty(Context), i),
498 assert(In && "Couldn't get element of initializer?");
499 GlobalVariable *NGV = new GlobalVariable(Context,
500 STy->getElementType(i), false,
501 GlobalVariable::InternalLinkage,
502 In, GV->getName()+"."+Twine(i),
504 GV->getType()->getAddressSpace());
505 Globals.insert(GV, NGV);
506 NewGlobals.push_back(NGV);
508 // Calculate the known alignment of the field. If the original aggregate
509 // had 256 byte alignment for example, something might depend on that:
510 // propagate info to each field.
511 uint64_t FieldOffset = Layout.getElementOffset(i);
512 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, FieldOffset);
513 if (NewAlign > TD.getABITypeAlignment(STy->getElementType(i)))
514 NGV->setAlignment(NewAlign);
516 } else if (const SequentialType *STy = dyn_cast<SequentialType>(Ty)) {
517 unsigned NumElements = 0;
518 if (const ArrayType *ATy = dyn_cast<ArrayType>(STy))
519 NumElements = ATy->getNumElements();
521 NumElements = cast<VectorType>(STy)->getNumElements();
523 if (NumElements > 16 && GV->hasNUsesOrMore(16))
524 return 0; // It's not worth it.
525 NewGlobals.reserve(NumElements);
527 uint64_t EltSize = TD.getTypeAllocSize(STy->getElementType());
528 unsigned EltAlign = TD.getABITypeAlignment(STy->getElementType());
529 for (unsigned i = 0, e = NumElements; i != e; ++i) {
530 Constant *In = getAggregateConstantElement(Init,
531 ConstantInt::get(Type::getInt32Ty(Context), i),
533 assert(In && "Couldn't get element of initializer?");
535 GlobalVariable *NGV = new GlobalVariable(Context,
536 STy->getElementType(), false,
537 GlobalVariable::InternalLinkage,
538 In, GV->getName()+"."+Twine(i),
540 GV->getType()->getAddressSpace());
541 Globals.insert(GV, NGV);
542 NewGlobals.push_back(NGV);
544 // Calculate the known alignment of the field. If the original aggregate
545 // had 256 byte alignment for example, something might depend on that:
546 // propagate info to each field.
547 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, EltSize*i);
548 if (NewAlign > EltAlign)
549 NGV->setAlignment(NewAlign);
553 if (NewGlobals.empty())
556 DEBUG(errs() << "PERFORMING GLOBAL SRA ON: " << *GV);
558 Constant *NullInt = Constant::getNullValue(Type::getInt32Ty(Context));
560 // Loop over all of the uses of the global, replacing the constantexpr geps,
561 // with smaller constantexpr geps or direct references.
562 while (!GV->use_empty()) {
563 User *GEP = GV->use_back();
564 assert(((isa<ConstantExpr>(GEP) &&
565 cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)||
566 isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!");
568 // Ignore the 1th operand, which has to be zero or else the program is quite
569 // broken (undefined). Get the 2nd operand, which is the structure or array
571 unsigned Val = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue();
572 if (Val >= NewGlobals.size()) Val = 0; // Out of bound array access.
574 Value *NewPtr = NewGlobals[Val];
576 // Form a shorter GEP if needed.
577 if (GEP->getNumOperands() > 3) {
578 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GEP)) {
579 SmallVector<Constant*, 8> Idxs;
580 Idxs.push_back(NullInt);
581 for (unsigned i = 3, e = CE->getNumOperands(); i != e; ++i)
582 Idxs.push_back(CE->getOperand(i));
583 NewPtr = ConstantExpr::getGetElementPtr(cast<Constant>(NewPtr),
584 &Idxs[0], Idxs.size());
586 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(GEP);
587 SmallVector<Value*, 8> Idxs;
588 Idxs.push_back(NullInt);
589 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i)
590 Idxs.push_back(GEPI->getOperand(i));
591 NewPtr = GetElementPtrInst::Create(NewPtr, Idxs.begin(), Idxs.end(),
592 GEPI->getName()+"."+Twine(Val),GEPI);
595 GEP->replaceAllUsesWith(NewPtr);
597 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(GEP))
598 GEPI->eraseFromParent();
600 cast<ConstantExpr>(GEP)->destroyConstant();
603 // Delete the old global, now that it is dead.
607 // Loop over the new globals array deleting any globals that are obviously
608 // dead. This can arise due to scalarization of a structure or an array that
609 // has elements that are dead.
610 unsigned FirstGlobal = 0;
611 for (unsigned i = 0, e = NewGlobals.size(); i != e; ++i)
612 if (NewGlobals[i]->use_empty()) {
613 Globals.erase(NewGlobals[i]);
614 if (FirstGlobal == i) ++FirstGlobal;
617 return FirstGlobal != NewGlobals.size() ? NewGlobals[FirstGlobal] : 0;
620 /// AllUsesOfValueWillTrapIfNull - Return true if all users of the specified
621 /// value will trap if the value is dynamically null. PHIs keeps track of any
622 /// phi nodes we've seen to avoid reprocessing them.
623 static bool AllUsesOfValueWillTrapIfNull(Value *V,
624 SmallPtrSet<PHINode*, 8> &PHIs) {
625 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ++UI)
626 if (isa<LoadInst>(*UI)) {
628 } else if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) {
629 if (SI->getOperand(0) == V) {
630 //cerr << "NONTRAPPING USE: " << **UI;
631 return false; // Storing the value.
633 } else if (CallInst *CI = dyn_cast<CallInst>(*UI)) {
634 if (CI->getOperand(0) != V) {
635 //cerr << "NONTRAPPING USE: " << **UI;
636 return false; // Not calling the ptr
638 } else if (InvokeInst *II = dyn_cast<InvokeInst>(*UI)) {
639 if (II->getOperand(0) != V) {
640 //cerr << "NONTRAPPING USE: " << **UI;
641 return false; // Not calling the ptr
643 } else if (BitCastInst *CI = dyn_cast<BitCastInst>(*UI)) {
644 if (!AllUsesOfValueWillTrapIfNull(CI, PHIs)) return false;
645 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(*UI)) {
646 if (!AllUsesOfValueWillTrapIfNull(GEPI, PHIs)) return false;
647 } else if (PHINode *PN = dyn_cast<PHINode>(*UI)) {
648 // If we've already seen this phi node, ignore it, it has already been
651 return AllUsesOfValueWillTrapIfNull(PN, PHIs);
652 } else if (isa<ICmpInst>(*UI) &&
653 isa<ConstantPointerNull>(UI->getOperand(1))) {
654 // Ignore setcc X, null
656 //cerr << "NONTRAPPING USE: " << **UI;
662 /// AllUsesOfLoadedValueWillTrapIfNull - Return true if all uses of any loads
663 /// from GV will trap if the loaded value is null. Note that this also permits
664 /// comparisons of the loaded value against null, as a special case.
665 static bool AllUsesOfLoadedValueWillTrapIfNull(GlobalVariable *GV) {
666 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); UI!=E; ++UI)
667 if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
668 SmallPtrSet<PHINode*, 8> PHIs;
669 if (!AllUsesOfValueWillTrapIfNull(LI, PHIs))
671 } else if (isa<StoreInst>(*UI)) {
672 // Ignore stores to the global.
674 // We don't know or understand this user, bail out.
675 //cerr << "UNKNOWN USER OF GLOBAL!: " << **UI;
682 static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV,
683 LLVMContext &Context) {
684 bool Changed = false;
685 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ) {
686 Instruction *I = cast<Instruction>(*UI++);
687 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
688 LI->setOperand(0, NewV);
690 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
691 if (SI->getOperand(1) == V) {
692 SI->setOperand(1, NewV);
695 } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
696 if (I->getOperand(0) == V) {
697 // Calling through the pointer! Turn into a direct call, but be careful
698 // that the pointer is not also being passed as an argument.
699 I->setOperand(0, NewV);
701 bool PassedAsArg = false;
702 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i)
703 if (I->getOperand(i) == V) {
705 I->setOperand(i, NewV);
709 // Being passed as an argument also. Be careful to not invalidate UI!
713 } else if (CastInst *CI = dyn_cast<CastInst>(I)) {
714 Changed |= OptimizeAwayTrappingUsesOfValue(CI,
715 ConstantExpr::getCast(CI->getOpcode(),
716 NewV, CI->getType()), Context);
717 if (CI->use_empty()) {
719 CI->eraseFromParent();
721 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
722 // Should handle GEP here.
723 SmallVector<Constant*, 8> Idxs;
724 Idxs.reserve(GEPI->getNumOperands()-1);
725 for (User::op_iterator i = GEPI->op_begin() + 1, e = GEPI->op_end();
727 if (Constant *C = dyn_cast<Constant>(*i))
731 if (Idxs.size() == GEPI->getNumOperands()-1)
732 Changed |= OptimizeAwayTrappingUsesOfValue(GEPI,
733 ConstantExpr::getGetElementPtr(NewV, &Idxs[0],
734 Idxs.size()), Context);
735 if (GEPI->use_empty()) {
737 GEPI->eraseFromParent();
746 /// OptimizeAwayTrappingUsesOfLoads - The specified global has only one non-null
747 /// value stored into it. If there are uses of the loaded value that would trap
748 /// if the loaded value is dynamically null, then we know that they cannot be
749 /// reachable with a null optimize away the load.
750 static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
751 LLVMContext &Context) {
752 bool Changed = false;
754 // Keep track of whether we are able to remove all the uses of the global
755 // other than the store that defines it.
756 bool AllNonStoreUsesGone = true;
758 // Replace all uses of loads with uses of uses of the stored value.
759 for (Value::use_iterator GUI = GV->use_begin(), E = GV->use_end(); GUI != E;){
760 User *GlobalUser = *GUI++;
761 if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) {
762 Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV, Context);
763 // If we were able to delete all uses of the loads
764 if (LI->use_empty()) {
765 LI->eraseFromParent();
768 AllNonStoreUsesGone = false;
770 } else if (isa<StoreInst>(GlobalUser)) {
771 // Ignore the store that stores "LV" to the global.
772 assert(GlobalUser->getOperand(1) == GV &&
773 "Must be storing *to* the global");
775 AllNonStoreUsesGone = false;
777 // If we get here we could have other crazy uses that are transitively
779 assert((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) ||
780 isa<ConstantExpr>(GlobalUser)) && "Only expect load and stores!");
785 DEBUG(errs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV);
789 // If we nuked all of the loads, then none of the stores are needed either,
790 // nor is the global.
791 if (AllNonStoreUsesGone) {
792 DEBUG(errs() << " *** GLOBAL NOW DEAD!\n");
793 CleanupConstantGlobalUsers(GV, 0, Context);
794 if (GV->use_empty()) {
795 GV->eraseFromParent();
803 /// ConstantPropUsersOf - Walk the use list of V, constant folding all of the
804 /// instructions that are foldable.
805 static void ConstantPropUsersOf(Value *V, LLVMContext &Context) {
806 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; )
807 if (Instruction *I = dyn_cast<Instruction>(*UI++))
808 if (Constant *NewC = ConstantFoldInstruction(I, Context)) {
809 I->replaceAllUsesWith(NewC);
811 // Advance UI to the next non-I use to avoid invalidating it!
812 // Instructions could multiply use V.
813 while (UI != E && *UI == I)
815 I->eraseFromParent();
819 /// OptimizeGlobalAddressOfMalloc - This function takes the specified global
820 /// variable, and transforms the program as if it always contained the result of
821 /// the specified malloc. Because it is always the result of the specified
822 /// malloc, there is no reason to actually DO the malloc. Instead, turn the
823 /// malloc into a global, and any loads of GV as uses of the new global.
824 static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
826 LLVMContext &Context) {
827 DEBUG(errs() << "PROMOTING MALLOC GLOBAL: " << *GV << " MALLOC = " << *MI);
828 ConstantInt *NElements = cast<ConstantInt>(MI->getArraySize());
830 if (NElements->getZExtValue() != 1) {
831 // If we have an array allocation, transform it to a single element
832 // allocation to make the code below simpler.
833 Type *NewTy = ArrayType::get(MI->getAllocatedType(),
834 NElements->getZExtValue());
836 new MallocInst(NewTy, Constant::getNullValue(Type::getInt32Ty(Context)),
837 MI->getAlignment(), MI->getName(), MI);
839 Indices[0] = Indices[1] = Constant::getNullValue(Type::getInt32Ty(Context));
840 Value *NewGEP = GetElementPtrInst::Create(NewMI, Indices, Indices + 2,
841 NewMI->getName()+".el0", MI);
842 MI->replaceAllUsesWith(NewGEP);
843 MI->eraseFromParent();
847 // Create the new global variable. The contents of the malloc'd memory is
848 // undefined, so initialize with an undef value.
849 // FIXME: This new global should have the alignment returned by malloc. Code
850 // could depend on malloc returning large alignment (on the mac, 16 bytes) but
851 // this would only guarantee some lower alignment.
852 Constant *Init = UndefValue::get(MI->getAllocatedType());
853 GlobalVariable *NewGV = new GlobalVariable(*GV->getParent(),
854 MI->getAllocatedType(), false,
855 GlobalValue::InternalLinkage, Init,
856 GV->getName()+".body",
858 GV->isThreadLocal());
860 // Anything that used the malloc now uses the global directly.
861 MI->replaceAllUsesWith(NewGV);
863 Constant *RepValue = NewGV;
864 if (NewGV->getType() != GV->getType()->getElementType())
865 RepValue = ConstantExpr::getBitCast(RepValue,
866 GV->getType()->getElementType());
868 // If there is a comparison against null, we will insert a global bool to
869 // keep track of whether the global was initialized yet or not.
870 GlobalVariable *InitBool =
871 new GlobalVariable(Context, Type::getInt1Ty(Context), false,
872 GlobalValue::InternalLinkage,
873 ConstantInt::getFalse(Context), GV->getName()+".init",
874 GV->isThreadLocal());
875 bool InitBoolUsed = false;
877 // Loop over all uses of GV, processing them in turn.
878 std::vector<StoreInst*> Stores;
879 while (!GV->use_empty())
880 if (LoadInst *LI = dyn_cast<LoadInst>(GV->use_back())) {
881 while (!LI->use_empty()) {
882 Use &LoadUse = LI->use_begin().getUse();
883 if (!isa<ICmpInst>(LoadUse.getUser()))
886 ICmpInst *CI = cast<ICmpInst>(LoadUse.getUser());
887 // Replace the cmp X, 0 with a use of the bool value.
888 Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", CI);
890 switch (CI->getPredicate()) {
891 default: llvm_unreachable("Unknown ICmp Predicate!");
892 case ICmpInst::ICMP_ULT:
893 case ICmpInst::ICMP_SLT:
894 LV = ConstantInt::getFalse(Context); // X < null -> always false
896 case ICmpInst::ICMP_ULE:
897 case ICmpInst::ICMP_SLE:
898 case ICmpInst::ICMP_EQ:
899 LV = BinaryOperator::CreateNot(LV, "notinit", CI);
901 case ICmpInst::ICMP_NE:
902 case ICmpInst::ICMP_UGE:
903 case ICmpInst::ICMP_SGE:
904 case ICmpInst::ICMP_UGT:
905 case ICmpInst::ICMP_SGT:
908 CI->replaceAllUsesWith(LV);
909 CI->eraseFromParent();
912 LI->eraseFromParent();
914 StoreInst *SI = cast<StoreInst>(GV->use_back());
915 // The global is initialized when the store to it occurs.
916 new StoreInst(ConstantInt::getTrue(Context), InitBool, SI);
917 SI->eraseFromParent();
920 // If the initialization boolean was used, insert it, otherwise delete it.
922 while (!InitBool->use_empty()) // Delete initializations
923 cast<Instruction>(InitBool->use_back())->eraseFromParent();
926 GV->getParent()->getGlobalList().insert(GV, InitBool);
929 // Now the GV is dead, nuke it and the malloc.
930 GV->eraseFromParent();
931 MI->eraseFromParent();
933 // To further other optimizations, loop over all users of NewGV and try to
934 // constant prop them. This will promote GEP instructions with constant
935 // indices into GEP constant-exprs, which will allow global-opt to hack on it.
936 ConstantPropUsersOf(NewGV, Context);
937 if (RepValue != NewGV)
938 ConstantPropUsersOf(RepValue, Context);
943 /// OptimizeGlobalAddressOfMalloc - This function takes the specified global
944 /// variable, and transforms the program as if it always contained the result of
945 /// the specified malloc. Because it is always the result of the specified
946 /// malloc, there is no reason to actually DO the malloc. Instead, turn the
947 /// malloc into a global, and any loads of GV as uses of the new global.
948 static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
951 LLVMContext &Context,
953 DEBUG(errs() << "PROMOTING MALLOC GLOBAL: " << *GV
954 << " CALL = " << *CI << " BCI = " << *BCI << '\n');
956 const Type *IntPtrTy = TD->getIntPtrType(Context);
958 Value* ArraySize = getMallocArraySize(CI, Context, TD);
959 assert(ArraySize && "not a malloc whose array size can be determined");
960 ConstantInt *NElements = cast<ConstantInt>(ArraySize);
961 if (NElements->getZExtValue() != 1) {
962 // If we have an array allocation, transform it to a single element
963 // allocation to make the code below simpler.
964 Type *NewTy = ArrayType::get(getMallocAllocatedType(CI),
965 NElements->getZExtValue());
966 Value* NewM = CallInst::CreateMalloc(CI, IntPtrTy, NewTy);
967 Instruction* NewMI = cast<Instruction>(NewM);
969 Indices[0] = Indices[1] = Constant::getNullValue(IntPtrTy);
970 Value *NewGEP = GetElementPtrInst::Create(NewMI, Indices, Indices + 2,
971 NewMI->getName()+".el0", CI);
972 BCI->replaceAllUsesWith(NewGEP);
973 BCI->eraseFromParent();
974 CI->eraseFromParent();
975 BCI = cast<BitCastInst>(NewMI);
976 CI = extractMallocCallFromBitCast(NewMI);
979 // Create the new global variable. The contents of the malloc'd memory is
980 // undefined, so initialize with an undef value.
981 const Type *MAT = getMallocAllocatedType(CI);
982 Constant *Init = UndefValue::get(MAT);
983 GlobalVariable *NewGV = new GlobalVariable(*GV->getParent(),
985 GlobalValue::InternalLinkage, Init,
986 GV->getName()+".body",
988 GV->isThreadLocal());
990 // Anything that used the malloc now uses the global directly.
991 BCI->replaceAllUsesWith(NewGV);
993 Constant *RepValue = NewGV;
994 if (NewGV->getType() != GV->getType()->getElementType())
995 RepValue = ConstantExpr::getBitCast(RepValue,
996 GV->getType()->getElementType());
998 // If there is a comparison against null, we will insert a global bool to
999 // keep track of whether the global was initialized yet or not.
1000 GlobalVariable *InitBool =
1001 new GlobalVariable(Context, Type::getInt1Ty(Context), false,
1002 GlobalValue::InternalLinkage,
1003 ConstantInt::getFalse(Context), GV->getName()+".init",
1004 GV->isThreadLocal());
1005 bool InitBoolUsed = false;
1007 // Loop over all uses of GV, processing them in turn.
1008 std::vector<StoreInst*> Stores;
1009 while (!GV->use_empty())
1010 if (LoadInst *LI = dyn_cast<LoadInst>(GV->use_back())) {
1011 while (!LI->use_empty()) {
1012 Use &LoadUse = LI->use_begin().getUse();
1013 if (!isa<ICmpInst>(LoadUse.getUser()))
1016 ICmpInst *ICI = cast<ICmpInst>(LoadUse.getUser());
1017 // Replace the cmp X, 0 with a use of the bool value.
1018 Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", ICI);
1019 InitBoolUsed = true;
1020 switch (ICI->getPredicate()) {
1021 default: llvm_unreachable("Unknown ICmp Predicate!");
1022 case ICmpInst::ICMP_ULT:
1023 case ICmpInst::ICMP_SLT:
1024 LV = ConstantInt::getFalse(Context); // X < null -> always false
1026 case ICmpInst::ICMP_ULE:
1027 case ICmpInst::ICMP_SLE:
1028 case ICmpInst::ICMP_EQ:
1029 LV = BinaryOperator::CreateNot(LV, "notinit", ICI);
1031 case ICmpInst::ICMP_NE:
1032 case ICmpInst::ICMP_UGE:
1033 case ICmpInst::ICMP_SGE:
1034 case ICmpInst::ICMP_UGT:
1035 case ICmpInst::ICMP_SGT:
1036 break; // no change.
1038 ICI->replaceAllUsesWith(LV);
1039 ICI->eraseFromParent();
1042 LI->eraseFromParent();
1044 StoreInst *SI = cast<StoreInst>(GV->use_back());
1045 // The global is initialized when the store to it occurs.
1046 new StoreInst(ConstantInt::getTrue(Context), InitBool, SI);
1047 SI->eraseFromParent();
1050 // If the initialization boolean was used, insert it, otherwise delete it.
1051 if (!InitBoolUsed) {
1052 while (!InitBool->use_empty()) // Delete initializations
1053 cast<Instruction>(InitBool->use_back())->eraseFromParent();
1056 GV->getParent()->getGlobalList().insert(GV, InitBool);
1059 // Now the GV is dead, nuke it and the malloc.
1060 GV->eraseFromParent();
1061 BCI->eraseFromParent();
1062 CI->eraseFromParent();
1064 // To further other optimizations, loop over all users of NewGV and try to
1065 // constant prop them. This will promote GEP instructions with constant
1066 // indices into GEP constant-exprs, which will allow global-opt to hack on it.
1067 ConstantPropUsersOf(NewGV, Context);
1068 if (RepValue != NewGV)
1069 ConstantPropUsersOf(RepValue, Context);
1074 /// ValueIsOnlyUsedLocallyOrStoredToOneGlobal - Scan the use-list of V checking
1075 /// to make sure that there are no complex uses of V. We permit simple things
1076 /// like dereferencing the pointer, but not storing through the address, unless
1077 /// it is to the specified global.
1078 static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Instruction *V,
1080 SmallPtrSet<PHINode*, 8> &PHIs) {
1081 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
1082 Instruction *Inst = cast<Instruction>(*UI);
1084 if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) {
1085 continue; // Fine, ignore.
1088 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
1089 if (SI->getOperand(0) == V && SI->getOperand(1) != GV)
1090 return false; // Storing the pointer itself... bad.
1091 continue; // Otherwise, storing through it, or storing into GV... fine.
1094 if (isa<GetElementPtrInst>(Inst)) {
1095 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Inst, GV, PHIs))
1100 if (PHINode *PN = dyn_cast<PHINode>(Inst)) {
1101 // PHIs are ok if all uses are ok. Don't infinitely recurse through PHI
1103 if (PHIs.insert(PN))
1104 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(PN, GV, PHIs))
1109 if (BitCastInst *BCI = dyn_cast<BitCastInst>(Inst)) {
1110 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(BCI, GV, PHIs))
1120 /// ReplaceUsesOfMallocWithGlobal - The Alloc pointer is stored into GV
1121 /// somewhere. Transform all uses of the allocation into loads from the
1122 /// global and uses of the resultant pointer. Further, delete the store into
1123 /// GV. This assumes that these value pass the
1124 /// 'ValueIsOnlyUsedLocallyOrStoredToOneGlobal' predicate.
1125 static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc,
1126 GlobalVariable *GV) {
1127 while (!Alloc->use_empty()) {
1128 Instruction *U = cast<Instruction>(*Alloc->use_begin());
1129 Instruction *InsertPt = U;
1130 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1131 // If this is the store of the allocation into the global, remove it.
1132 if (SI->getOperand(1) == GV) {
1133 SI->eraseFromParent();
1136 } else if (PHINode *PN = dyn_cast<PHINode>(U)) {
1137 // Insert the load in the corresponding predecessor, not right before the
1139 InsertPt = PN->getIncomingBlock(Alloc->use_begin())->getTerminator();
1140 } else if (isa<BitCastInst>(U)) {
1141 // Must be bitcast between the malloc and store to initialize the global.
1142 ReplaceUsesOfMallocWithGlobal(U, GV);
1143 U->eraseFromParent();
1145 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
1146 // If this is a "GEP bitcast" and the user is a store to the global, then
1147 // just process it as a bitcast.
1148 if (GEPI->hasAllZeroIndices() && GEPI->hasOneUse())
1149 if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->use_back()))
1150 if (SI->getOperand(1) == GV) {
1151 // Must be bitcast GEP between the malloc and store to initialize
1153 ReplaceUsesOfMallocWithGlobal(GEPI, GV);
1154 GEPI->eraseFromParent();
1159 // Insert a load from the global, and use it instead of the malloc.
1160 Value *NL = new LoadInst(GV, GV->getName()+".val", InsertPt);
1161 U->replaceUsesOfWith(Alloc, NL);
1165 /// LoadUsesSimpleEnoughForHeapSRA - Verify that all uses of V (a load, or a phi
1166 /// of a load) are simple enough to perform heap SRA on. This permits GEP's
1167 /// that index through the array and struct field, icmps of null, and PHIs.
1168 static bool LoadUsesSimpleEnoughForHeapSRA(Value *V,
1169 SmallPtrSet<PHINode*, 32> &LoadUsingPHIs,
1170 SmallPtrSet<PHINode*, 32> &LoadUsingPHIsPerLoad) {
1171 // We permit two users of the load: setcc comparing against the null
1172 // pointer, and a getelementptr of a specific form.
1173 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
1174 Instruction *User = cast<Instruction>(*UI);
1176 // Comparison against null is ok.
1177 if (ICmpInst *ICI = dyn_cast<ICmpInst>(User)) {
1178 if (!isa<ConstantPointerNull>(ICI->getOperand(1)))
1183 // getelementptr is also ok, but only a simple form.
1184 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) {
1185 // Must index into the array and into the struct.
1186 if (GEPI->getNumOperands() < 3)
1189 // Otherwise the GEP is ok.
1193 if (PHINode *PN = dyn_cast<PHINode>(User)) {
1194 if (!LoadUsingPHIsPerLoad.insert(PN))
1195 // This means some phi nodes are dependent on each other.
1196 // Avoid infinite looping!
1198 if (!LoadUsingPHIs.insert(PN))
1199 // If we have already analyzed this PHI, then it is safe.
1202 // Make sure all uses of the PHI are simple enough to transform.
1203 if (!LoadUsesSimpleEnoughForHeapSRA(PN,
1204 LoadUsingPHIs, LoadUsingPHIsPerLoad))
1210 // Otherwise we don't know what this is, not ok.
1218 /// AllGlobalLoadUsesSimpleEnoughForHeapSRA - If all users of values loaded from
1219 /// GV are simple enough to perform HeapSRA, return true.
1220 static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(GlobalVariable *GV,
1221 Instruction *StoredVal) {
1222 SmallPtrSet<PHINode*, 32> LoadUsingPHIs;
1223 SmallPtrSet<PHINode*, 32> LoadUsingPHIsPerLoad;
1224 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); UI != E;
1226 if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
1227 if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs,
1228 LoadUsingPHIsPerLoad))
1230 LoadUsingPHIsPerLoad.clear();
1233 // If we reach here, we know that all uses of the loads and transitive uses
1234 // (through PHI nodes) are simple enough to transform. However, we don't know
1235 // that all inputs the to the PHI nodes are in the same equivalence sets.
1236 // Check to verify that all operands of the PHIs are either PHIS that can be
1237 // transformed, loads from GV, or MI itself.
1238 for (SmallPtrSet<PHINode*, 32>::iterator I = LoadUsingPHIs.begin(),
1239 E = LoadUsingPHIs.end(); I != E; ++I) {
1241 for (unsigned op = 0, e = PN->getNumIncomingValues(); op != e; ++op) {
1242 Value *InVal = PN->getIncomingValue(op);
1244 // PHI of the stored value itself is ok.
1245 if (InVal == StoredVal) continue;
1247 if (PHINode *InPN = dyn_cast<PHINode>(InVal)) {
1248 // One of the PHIs in our set is (optimistically) ok.
1249 if (LoadUsingPHIs.count(InPN))
1254 // Load from GV is ok.
1255 if (LoadInst *LI = dyn_cast<LoadInst>(InVal))
1256 if (LI->getOperand(0) == GV)
1261 // Anything else is rejected.
1269 static Value *GetHeapSROAValue(Value *V, unsigned FieldNo,
1270 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
1271 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite,
1272 LLVMContext &Context) {
1273 std::vector<Value*> &FieldVals = InsertedScalarizedValues[V];
1275 if (FieldNo >= FieldVals.size())
1276 FieldVals.resize(FieldNo+1);
1278 // If we already have this value, just reuse the previously scalarized
1280 if (Value *FieldVal = FieldVals[FieldNo])
1283 // Depending on what instruction this is, we have several cases.
1285 if (LoadInst *LI = dyn_cast<LoadInst>(V)) {
1286 // This is a scalarized version of the load from the global. Just create
1287 // a new Load of the scalarized global.
1288 Result = new LoadInst(GetHeapSROAValue(LI->getOperand(0), FieldNo,
1289 InsertedScalarizedValues,
1290 PHIsToRewrite, Context),
1291 LI->getName()+".f"+Twine(FieldNo), LI);
1292 } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
1293 // PN's type is pointer to struct. Make a new PHI of pointer to struct
1295 const StructType *ST =
1296 cast<StructType>(cast<PointerType>(PN->getType())->getElementType());
1299 PHINode::Create(PointerType::getUnqual(ST->getElementType(FieldNo)),
1300 PN->getName()+".f"+Twine(FieldNo), PN);
1301 PHIsToRewrite.push_back(std::make_pair(PN, FieldNo));
1303 llvm_unreachable("Unknown usable value");
1307 return FieldVals[FieldNo] = Result;
1310 /// RewriteHeapSROALoadUser - Given a load instruction and a value derived from
1311 /// the load, rewrite the derived value to use the HeapSRoA'd load.
1312 static void RewriteHeapSROALoadUser(Instruction *LoadUser,
1313 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
1314 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite,
1315 LLVMContext &Context) {
1316 // If this is a comparison against null, handle it.
1317 if (ICmpInst *SCI = dyn_cast<ICmpInst>(LoadUser)) {
1318 assert(isa<ConstantPointerNull>(SCI->getOperand(1)));
1319 // If we have a setcc of the loaded pointer, we can use a setcc of any
1321 Value *NPtr = GetHeapSROAValue(SCI->getOperand(0), 0,
1322 InsertedScalarizedValues, PHIsToRewrite,
1325 Value *New = new ICmpInst(SCI, SCI->getPredicate(), NPtr,
1326 Constant::getNullValue(NPtr->getType()),
1328 SCI->replaceAllUsesWith(New);
1329 SCI->eraseFromParent();
1333 // Handle 'getelementptr Ptr, Idx, i32 FieldNo ...'
1334 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(LoadUser)) {
1335 assert(GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2))
1336 && "Unexpected GEPI!");
1338 // Load the pointer for this field.
1339 unsigned FieldNo = cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue();
1340 Value *NewPtr = GetHeapSROAValue(GEPI->getOperand(0), FieldNo,
1341 InsertedScalarizedValues, PHIsToRewrite,
1344 // Create the new GEP idx vector.
1345 SmallVector<Value*, 8> GEPIdx;
1346 GEPIdx.push_back(GEPI->getOperand(1));
1347 GEPIdx.append(GEPI->op_begin()+3, GEPI->op_end());
1349 Value *NGEPI = GetElementPtrInst::Create(NewPtr,
1350 GEPIdx.begin(), GEPIdx.end(),
1351 GEPI->getName(), GEPI);
1352 GEPI->replaceAllUsesWith(NGEPI);
1353 GEPI->eraseFromParent();
1357 // Recursively transform the users of PHI nodes. This will lazily create the
1358 // PHIs that are needed for individual elements. Keep track of what PHIs we
1359 // see in InsertedScalarizedValues so that we don't get infinite loops (very
1360 // antisocial). If the PHI is already in InsertedScalarizedValues, it has
1361 // already been seen first by another load, so its uses have already been
1363 PHINode *PN = cast<PHINode>(LoadUser);
1365 DenseMap<Value*, std::vector<Value*> >::iterator InsertPos;
1366 tie(InsertPos, Inserted) =
1367 InsertedScalarizedValues.insert(std::make_pair(PN, std::vector<Value*>()));
1368 if (!Inserted) return;
1370 // If this is the first time we've seen this PHI, recursively process all
1372 for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end(); UI != E; ) {
1373 Instruction *User = cast<Instruction>(*UI++);
1374 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite,
1379 /// RewriteUsesOfLoadForHeapSRoA - We are performing Heap SRoA on a global. Ptr
1380 /// is a value loaded from the global. Eliminate all uses of Ptr, making them
1381 /// use FieldGlobals instead. All uses of loaded values satisfy
1382 /// AllGlobalLoadUsesSimpleEnoughForHeapSRA.
1383 static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
1384 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
1385 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite,
1386 LLVMContext &Context) {
1387 for (Value::use_iterator UI = Load->use_begin(), E = Load->use_end();
1389 Instruction *User = cast<Instruction>(*UI++);
1390 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite,
1394 if (Load->use_empty()) {
1395 Load->eraseFromParent();
1396 InsertedScalarizedValues.erase(Load);
1400 /// PerformHeapAllocSRoA - MI is an allocation of an array of structures. Break
1401 /// it up into multiple allocations of arrays of the fields.
1402 static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, MallocInst *MI,
1403 LLVMContext &Context){
1404 DEBUG(errs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *MI);
1405 const StructType *STy = cast<StructType>(MI->getAllocatedType());
1407 // There is guaranteed to be at least one use of the malloc (storing
1408 // it into GV). If there are other uses, change them to be uses of
1409 // the global to simplify later code. This also deletes the store
1411 ReplaceUsesOfMallocWithGlobal(MI, GV);
1413 // Okay, at this point, there are no users of the malloc. Insert N
1414 // new mallocs at the same place as MI, and N globals.
1415 std::vector<Value*> FieldGlobals;
1416 std::vector<MallocInst*> FieldMallocs;
1418 for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){
1419 const Type *FieldTy = STy->getElementType(FieldNo);
1420 const Type *PFieldTy = PointerType::getUnqual(FieldTy);
1422 GlobalVariable *NGV =
1423 new GlobalVariable(*GV->getParent(),
1424 PFieldTy, false, GlobalValue::InternalLinkage,
1425 Constant::getNullValue(PFieldTy),
1426 GV->getName() + ".f" + Twine(FieldNo), GV,
1427 GV->isThreadLocal());
1428 FieldGlobals.push_back(NGV);
1430 MallocInst *NMI = new MallocInst(FieldTy, MI->getArraySize(),
1431 MI->getName() + ".f" + Twine(FieldNo), MI);
1432 FieldMallocs.push_back(NMI);
1433 new StoreInst(NMI, NGV, MI);
1436 // The tricky aspect of this transformation is handling the case when malloc
1437 // fails. In the original code, malloc failing would set the result pointer
1438 // of malloc to null. In this case, some mallocs could succeed and others
1439 // could fail. As such, we emit code that looks like this:
1440 // F0 = malloc(field0)
1441 // F1 = malloc(field1)
1442 // F2 = malloc(field2)
1443 // if (F0 == 0 || F1 == 0 || F2 == 0) {
1444 // if (F0) { free(F0); F0 = 0; }
1445 // if (F1) { free(F1); F1 = 0; }
1446 // if (F2) { free(F2); F2 = 0; }
1448 Value *RunningOr = 0;
1449 for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) {
1450 Value *Cond = new ICmpInst(MI, ICmpInst::ICMP_EQ, FieldMallocs[i],
1451 Constant::getNullValue(FieldMallocs[i]->getType()),
1454 RunningOr = Cond; // First seteq
1456 RunningOr = BinaryOperator::CreateOr(RunningOr, Cond, "tmp", MI);
1459 // Split the basic block at the old malloc.
1460 BasicBlock *OrigBB = MI->getParent();
1461 BasicBlock *ContBB = OrigBB->splitBasicBlock(MI, "malloc_cont");
1463 // Create the block to check the first condition. Put all these blocks at the
1464 // end of the function as they are unlikely to be executed.
1465 BasicBlock *NullPtrBlock = BasicBlock::Create(Context, "malloc_ret_null",
1466 OrigBB->getParent());
1468 // Remove the uncond branch from OrigBB to ContBB, turning it into a cond
1469 // branch on RunningOr.
1470 OrigBB->getTerminator()->eraseFromParent();
1471 BranchInst::Create(NullPtrBlock, ContBB, RunningOr, OrigBB);
1473 // Within the NullPtrBlock, we need to emit a comparison and branch for each
1474 // pointer, because some may be null while others are not.
1475 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
1476 Value *GVVal = new LoadInst(FieldGlobals[i], "tmp", NullPtrBlock);
1477 Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal,
1478 Constant::getNullValue(GVVal->getType()),
1480 BasicBlock *FreeBlock = BasicBlock::Create(Context, "free_it",
1481 OrigBB->getParent());
1482 BasicBlock *NextBlock = BasicBlock::Create(Context, "next",
1483 OrigBB->getParent());
1484 BranchInst::Create(FreeBlock, NextBlock, Cmp, NullPtrBlock);
1486 // Fill in FreeBlock.
1487 new FreeInst(GVVal, FreeBlock);
1488 new StoreInst(Constant::getNullValue(GVVal->getType()), FieldGlobals[i],
1490 BranchInst::Create(NextBlock, FreeBlock);
1492 NullPtrBlock = NextBlock;
1495 BranchInst::Create(ContBB, NullPtrBlock);
1497 // MI is no longer needed, remove it.
1498 MI->eraseFromParent();
1500 /// InsertedScalarizedLoads - As we process loads, if we can't immediately
1501 /// update all uses of the load, keep track of what scalarized loads are
1502 /// inserted for a given load.
1503 DenseMap<Value*, std::vector<Value*> > InsertedScalarizedValues;
1504 InsertedScalarizedValues[GV] = FieldGlobals;
1506 std::vector<std::pair<PHINode*, unsigned> > PHIsToRewrite;
1508 // Okay, the malloc site is completely handled. All of the uses of GV are now
1509 // loads, and all uses of those loads are simple. Rewrite them to use loads
1510 // of the per-field globals instead.
1511 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); UI != E;) {
1512 Instruction *User = cast<Instruction>(*UI++);
1514 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
1515 RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite,
1520 // Must be a store of null.
1521 StoreInst *SI = cast<StoreInst>(User);
1522 assert(isa<ConstantPointerNull>(SI->getOperand(0)) &&
1523 "Unexpected heap-sra user!");
1525 // Insert a store of null into each global.
1526 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
1527 const PointerType *PT = cast<PointerType>(FieldGlobals[i]->getType());
1528 Constant *Null = Constant::getNullValue(PT->getElementType());
1529 new StoreInst(Null, FieldGlobals[i], SI);
1531 // Erase the original store.
1532 SI->eraseFromParent();
1535 // While we have PHIs that are interesting to rewrite, do it.
1536 while (!PHIsToRewrite.empty()) {
1537 PHINode *PN = PHIsToRewrite.back().first;
1538 unsigned FieldNo = PHIsToRewrite.back().second;
1539 PHIsToRewrite.pop_back();
1540 PHINode *FieldPN = cast<PHINode>(InsertedScalarizedValues[PN][FieldNo]);
1541 assert(FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi");
1543 // Add all the incoming values. This can materialize more phis.
1544 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1545 Value *InVal = PN->getIncomingValue(i);
1546 InVal = GetHeapSROAValue(InVal, FieldNo, InsertedScalarizedValues,
1547 PHIsToRewrite, Context);
1548 FieldPN->addIncoming(InVal, PN->getIncomingBlock(i));
1552 // Drop all inter-phi links and any loads that made it this far.
1553 for (DenseMap<Value*, std::vector<Value*> >::iterator
1554 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end();
1556 if (PHINode *PN = dyn_cast<PHINode>(I->first))
1557 PN->dropAllReferences();
1558 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first))
1559 LI->dropAllReferences();
1562 // Delete all the phis and loads now that inter-references are dead.
1563 for (DenseMap<Value*, std::vector<Value*> >::iterator
1564 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end();
1566 if (PHINode *PN = dyn_cast<PHINode>(I->first))
1567 PN->eraseFromParent();
1568 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first))
1569 LI->eraseFromParent();
1572 // The old global is now dead, remove it.
1573 GV->eraseFromParent();
1576 return cast<GlobalVariable>(FieldGlobals[0]);
1579 /// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break
1580 /// it up into multiple allocations of arrays of the fields.
1581 static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV,
1582 CallInst *CI, BitCastInst* BCI,
1583 LLVMContext &Context,
1585 DEBUG(errs() << "SROA HEAP ALLOC: " << *GV << " MALLOC CALL = " << *CI
1586 << " BITCAST = " << *BCI << '\n');
1587 const Type* MAT = getMallocAllocatedType(CI);
1588 const StructType *STy = cast<StructType>(MAT);
1589 Value* ArraySize = getMallocArraySize(CI, Context, TD);
1590 assert(ArraySize && "not a malloc whose array size can be determined");
1592 // There is guaranteed to be at least one use of the malloc (storing
1593 // it into GV). If there are other uses, change them to be uses of
1594 // the global to simplify later code. This also deletes the store
1596 ReplaceUsesOfMallocWithGlobal(BCI, GV);
1598 // Okay, at this point, there are no users of the malloc. Insert N
1599 // new mallocs at the same place as CI, and N globals.
1600 std::vector<Value*> FieldGlobals;
1601 std::vector<Value*> FieldMallocs;
1603 for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){
1604 const Type *FieldTy = STy->getElementType(FieldNo);
1605 const PointerType *PFieldTy = PointerType::getUnqual(FieldTy);
1607 GlobalVariable *NGV =
1608 new GlobalVariable(*GV->getParent(),
1609 PFieldTy, false, GlobalValue::InternalLinkage,
1610 Constant::getNullValue(PFieldTy),
1611 GV->getName() + ".f" + Twine(FieldNo), GV,
1612 GV->isThreadLocal());
1613 FieldGlobals.push_back(NGV);
1615 Value *NMI = CallInst::CreateMalloc(CI, TD->getIntPtrType(Context),
1617 BCI->getName() + ".f" + Twine(FieldNo));
1618 FieldMallocs.push_back(NMI);
1619 new StoreInst(NMI, NGV, BCI);
1622 // The tricky aspect of this transformation is handling the case when malloc
1623 // fails. In the original code, malloc failing would set the result pointer
1624 // of malloc to null. In this case, some mallocs could succeed and others
1625 // could fail. As such, we emit code that looks like this:
1626 // F0 = malloc(field0)
1627 // F1 = malloc(field1)
1628 // F2 = malloc(field2)
1629 // if (F0 == 0 || F1 == 0 || F2 == 0) {
1630 // if (F0) { free(F0); F0 = 0; }
1631 // if (F1) { free(F1); F1 = 0; }
1632 // if (F2) { free(F2); F2 = 0; }
1634 Value *RunningOr = 0;
1635 for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) {
1636 Value *Cond = new ICmpInst(BCI, ICmpInst::ICMP_EQ, FieldMallocs[i],
1637 Constant::getNullValue(FieldMallocs[i]->getType()),
1640 RunningOr = Cond; // First seteq
1642 RunningOr = BinaryOperator::CreateOr(RunningOr, Cond, "tmp", BCI);
1645 // Split the basic block at the old malloc.
1646 BasicBlock *OrigBB = BCI->getParent();
1647 BasicBlock *ContBB = OrigBB->splitBasicBlock(BCI, "malloc_cont");
1649 // Create the block to check the first condition. Put all these blocks at the
1650 // end of the function as they are unlikely to be executed.
1651 BasicBlock *NullPtrBlock = BasicBlock::Create(Context, "malloc_ret_null",
1652 OrigBB->getParent());
1654 // Remove the uncond branch from OrigBB to ContBB, turning it into a cond
1655 // branch on RunningOr.
1656 OrigBB->getTerminator()->eraseFromParent();
1657 BranchInst::Create(NullPtrBlock, ContBB, RunningOr, OrigBB);
1659 // Within the NullPtrBlock, we need to emit a comparison and branch for each
1660 // pointer, because some may be null while others are not.
1661 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
1662 Value *GVVal = new LoadInst(FieldGlobals[i], "tmp", NullPtrBlock);
1663 Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal,
1664 Constant::getNullValue(GVVal->getType()),
1666 BasicBlock *FreeBlock = BasicBlock::Create(Context, "free_it",
1667 OrigBB->getParent());
1668 BasicBlock *NextBlock = BasicBlock::Create(Context, "next",
1669 OrigBB->getParent());
1670 BranchInst::Create(FreeBlock, NextBlock, Cmp, NullPtrBlock);
1672 // Fill in FreeBlock.
1673 new FreeInst(GVVal, FreeBlock);
1674 new StoreInst(Constant::getNullValue(GVVal->getType()), FieldGlobals[i],
1676 BranchInst::Create(NextBlock, FreeBlock);
1678 NullPtrBlock = NextBlock;
1681 BranchInst::Create(ContBB, NullPtrBlock);
1683 // CI and BCI are no longer needed, remove them.
1684 BCI->eraseFromParent();
1685 CI->eraseFromParent();
1687 /// InsertedScalarizedLoads - As we process loads, if we can't immediately
1688 /// update all uses of the load, keep track of what scalarized loads are
1689 /// inserted for a given load.
1690 DenseMap<Value*, std::vector<Value*> > InsertedScalarizedValues;
1691 InsertedScalarizedValues[GV] = FieldGlobals;
1693 std::vector<std::pair<PHINode*, unsigned> > PHIsToRewrite;
1695 // Okay, the malloc site is completely handled. All of the uses of GV are now
1696 // loads, and all uses of those loads are simple. Rewrite them to use loads
1697 // of the per-field globals instead.
1698 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); UI != E;) {
1699 Instruction *User = cast<Instruction>(*UI++);
1701 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
1702 RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite,
1707 // Must be a store of null.
1708 StoreInst *SI = cast<StoreInst>(User);
1709 assert(isa<ConstantPointerNull>(SI->getOperand(0)) &&
1710 "Unexpected heap-sra user!");
1712 // Insert a store of null into each global.
1713 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
1714 const PointerType *PT = cast<PointerType>(FieldGlobals[i]->getType());
1715 Constant *Null = Constant::getNullValue(PT->getElementType());
1716 new StoreInst(Null, FieldGlobals[i], SI);
1718 // Erase the original store.
1719 SI->eraseFromParent();
1722 // While we have PHIs that are interesting to rewrite, do it.
1723 while (!PHIsToRewrite.empty()) {
1724 PHINode *PN = PHIsToRewrite.back().first;
1725 unsigned FieldNo = PHIsToRewrite.back().second;
1726 PHIsToRewrite.pop_back();
1727 PHINode *FieldPN = cast<PHINode>(InsertedScalarizedValues[PN][FieldNo]);
1728 assert(FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi");
1730 // Add all the incoming values. This can materialize more phis.
1731 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1732 Value *InVal = PN->getIncomingValue(i);
1733 InVal = GetHeapSROAValue(InVal, FieldNo, InsertedScalarizedValues,
1734 PHIsToRewrite, Context);
1735 FieldPN->addIncoming(InVal, PN->getIncomingBlock(i));
1739 // Drop all inter-phi links and any loads that made it this far.
1740 for (DenseMap<Value*, std::vector<Value*> >::iterator
1741 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end();
1743 if (PHINode *PN = dyn_cast<PHINode>(I->first))
1744 PN->dropAllReferences();
1745 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first))
1746 LI->dropAllReferences();
1749 // Delete all the phis and loads now that inter-references are dead.
1750 for (DenseMap<Value*, std::vector<Value*> >::iterator
1751 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end();
1753 if (PHINode *PN = dyn_cast<PHINode>(I->first))
1754 PN->eraseFromParent();
1755 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first))
1756 LI->eraseFromParent();
1759 // The old global is now dead, remove it.
1760 GV->eraseFromParent();
1763 return cast<GlobalVariable>(FieldGlobals[0]);
1766 /// TryToOptimizeStoreOfMallocToGlobal - This function is called when we see a
1767 /// pointer global variable with a single value stored it that is a malloc or
1769 static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
1771 Module::global_iterator &GVI,
1773 LLVMContext &Context) {
1774 // If this is a malloc of an abstract type, don't touch it.
1775 if (!MI->getAllocatedType()->isSized())
1778 // We can't optimize this global unless all uses of it are *known* to be
1779 // of the malloc value, not of the null initializer value (consider a use
1780 // that compares the global's value against zero to see if the malloc has
1781 // been reached). To do this, we check to see if all uses of the global
1782 // would trap if the global were null: this proves that they must all
1783 // happen after the malloc.
1784 if (!AllUsesOfLoadedValueWillTrapIfNull(GV))
1787 // We can't optimize this if the malloc itself is used in a complex way,
1788 // for example, being stored into multiple globals. This allows the
1789 // malloc to be stored into the specified global, loaded setcc'd, and
1790 // GEP'd. These are all things we could transform to using the global
1793 SmallPtrSet<PHINode*, 8> PHIs;
1794 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(MI, GV, PHIs))
1799 // If we have a global that is only initialized with a fixed size malloc,
1800 // transform the program to use global memory instead of malloc'd memory.
1801 // This eliminates dynamic allocation, avoids an indirection accessing the
1802 // data, and exposes the resultant global to further GlobalOpt.
1803 if (ConstantInt *NElements = dyn_cast<ConstantInt>(MI->getArraySize())) {
1804 // Restrict this transformation to only working on small allocations
1805 // (2048 bytes currently), as we don't want to introduce a 16M global or
1808 NElements->getZExtValue()*
1809 TD->getTypeAllocSize(MI->getAllocatedType()) < 2048) {
1810 GVI = OptimizeGlobalAddressOfMalloc(GV, MI, Context);
1815 // If the allocation is an array of structures, consider transforming this
1816 // into multiple malloc'd arrays, one for each field. This is basically
1817 // SRoA for malloc'd memory.
1818 const Type *AllocTy = MI->getAllocatedType();
1820 // If this is an allocation of a fixed size array of structs, analyze as a
1821 // variable size array. malloc [100 x struct],1 -> malloc struct, 100
1822 if (!MI->isArrayAllocation())
1823 if (const ArrayType *AT = dyn_cast<ArrayType>(AllocTy))
1824 AllocTy = AT->getElementType();
1826 if (const StructType *AllocSTy = dyn_cast<StructType>(AllocTy)) {
1827 // This the structure has an unreasonable number of fields, leave it
1829 if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 &&
1830 AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, MI)) {
1832 // If this is a fixed size array, transform the Malloc to be an alloc of
1833 // structs. malloc [100 x struct],1 -> malloc struct, 100
1834 if (const ArrayType *AT = dyn_cast<ArrayType>(MI->getAllocatedType())) {
1836 new MallocInst(AllocSTy,
1837 ConstantInt::get(Type::getInt32Ty(Context),
1838 AT->getNumElements()),
1840 NewMI->takeName(MI);
1841 Value *Cast = new BitCastInst(NewMI, MI->getType(), "tmp", MI);
1842 MI->replaceAllUsesWith(Cast);
1843 MI->eraseFromParent();
1847 GVI = PerformHeapAllocSRoA(GV, MI, Context);
1855 /// TryToOptimizeStoreOfMallocToGlobal - This function is called when we see a
1856 /// pointer global variable with a single value stored it that is a malloc or
1858 static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
1861 Module::global_iterator &GVI,
1863 LLVMContext &Context) {
1864 // If we can't figure out the type being malloced, then we can't optimize.
1865 const Type *AllocTy = getMallocAllocatedType(CI);
1868 // If this is a malloc of an abstract type, don't touch it.
1869 if (!AllocTy->isSized())
1872 // We can't optimize this global unless all uses of it are *known* to be
1873 // of the malloc value, not of the null initializer value (consider a use
1874 // that compares the global's value against zero to see if the malloc has
1875 // been reached). To do this, we check to see if all uses of the global
1876 // would trap if the global were null: this proves that they must all
1877 // happen after the malloc.
1878 if (!AllUsesOfLoadedValueWillTrapIfNull(GV))
1881 // We can't optimize this if the malloc itself is used in a complex way,
1882 // for example, being stored into multiple globals. This allows the
1883 // malloc to be stored into the specified global, loaded setcc'd, and
1884 // GEP'd. These are all things we could transform to using the global
1887 SmallPtrSet<PHINode*, 8> PHIs;
1888 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(BCI, GV, PHIs))
1892 // If we have a global that is only initialized with a fixed size malloc,
1893 // transform the program to use global memory instead of malloc'd memory.
1894 // This eliminates dynamic allocation, avoids an indirection accessing the
1895 // data, and exposes the resultant global to further GlobalOpt.
1896 Value *NElems = getMallocArraySize(CI, Context, TD);
1897 // We cannot optimize the malloc if we cannot determine malloc array size.
1899 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems))
1900 // Restrict this transformation to only working on small allocations
1901 // (2048 bytes currently), as we don't want to introduce a 16M global or
1904 NElements->getZExtValue() * TD->getTypeAllocSize(AllocTy) < 2048) {
1905 GVI = OptimizeGlobalAddressOfMalloc(GV, CI, BCI, Context, TD);
1909 // If the allocation is an array of structures, consider transforming this
1910 // into multiple malloc'd arrays, one for each field. This is basically
1911 // SRoA for malloc'd memory.
1913 // If this is an allocation of a fixed size array of structs, analyze as a
1914 // variable size array. malloc [100 x struct],1 -> malloc struct, 100
1915 if (!isArrayMalloc(CI, Context, TD))
1916 if (const ArrayType *AT = dyn_cast<ArrayType>(AllocTy))
1917 AllocTy = AT->getElementType();
1919 if (const StructType *AllocSTy = dyn_cast<StructType>(AllocTy)) {
1920 // This the structure has an unreasonable number of fields, leave it
1922 if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 &&
1923 AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, BCI)) {
1925 // If this is a fixed size array, transform the Malloc to be an alloc of
1926 // structs. malloc [100 x struct],1 -> malloc struct, 100
1927 if (const ArrayType *AT =
1928 dyn_cast<ArrayType>(getMallocAllocatedType(CI))) {
1929 Value* NumElements = ConstantInt::get(Type::getInt32Ty(Context),
1930 AT->getNumElements());
1931 Value* NewMI = CallInst::CreateMalloc(CI, TD->getIntPtrType(Context),
1932 AllocSTy, NumElements,
1934 Value *Cast = new BitCastInst(NewMI, getMallocType(CI), "tmp", CI);
1935 BCI->replaceAllUsesWith(Cast);
1936 BCI->eraseFromParent();
1937 CI->eraseFromParent();
1938 BCI = cast<BitCastInst>(NewMI);
1939 CI = extractMallocCallFromBitCast(NewMI);
1942 GVI = PerformHeapAllocSRoA(GV, CI, BCI, Context, TD);
1951 // OptimizeOnceStoredGlobal - Try to optimize globals based on the knowledge
1952 // that only one value (besides its initializer) is ever stored to the global.
1953 static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
1954 Module::global_iterator &GVI,
1955 TargetData *TD, LLVMContext &Context) {
1956 // Ignore no-op GEPs and bitcasts.
1957 StoredOnceVal = StoredOnceVal->stripPointerCasts();
1959 // If we are dealing with a pointer global that is initialized to null and
1960 // only has one (non-null) value stored into it, then we can optimize any
1961 // users of the loaded value (often calls and loads) that would trap if the
1963 if (isa<PointerType>(GV->getInitializer()->getType()) &&
1964 GV->getInitializer()->isNullValue()) {
1965 if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) {
1966 if (GV->getInitializer()->getType() != SOVC->getType())
1968 ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType());
1970 // Optimize away any trapping uses of the loaded value.
1971 if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, Context))
1973 } else if (MallocInst *MI = dyn_cast<MallocInst>(StoredOnceVal)) {
1974 if (TryToOptimizeStoreOfMallocToGlobal(GV, MI, GVI, TD, Context))
1976 } else if (CallInst *CI = extractMallocCall(StoredOnceVal)) {
1977 if (getMallocAllocatedType(CI)) {
1978 BitCastInst* BCI = NULL;
1979 for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end();
1981 BCI = dyn_cast<BitCastInst>(cast<Instruction>(*UI++));
1983 TryToOptimizeStoreOfMallocToGlobal(GV, CI, BCI, GVI, TD, Context))
1992 /// TryToShrinkGlobalToBoolean - At this point, we have learned that the only
1993 /// two values ever stored into GV are its initializer and OtherVal. See if we
1994 /// can shrink the global into a boolean and select between the two values
1995 /// whenever it is used. This exposes the values to other scalar optimizations.
1996 static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal,
1997 LLVMContext &Context) {
1998 const Type *GVElType = GV->getType()->getElementType();
2000 // If GVElType is already i1, it is already shrunk. If the type of the GV is
2001 // an FP value, pointer or vector, don't do this optimization because a select
2002 // between them is very expensive and unlikely to lead to later
2003 // simplification. In these cases, we typically end up with "cond ? v1 : v2"
2004 // where v1 and v2 both require constant pool loads, a big loss.
2005 if (GVElType == Type::getInt1Ty(Context) || GVElType->isFloatingPoint() ||
2006 isa<PointerType>(GVElType) || isa<VectorType>(GVElType))
2009 // Walk the use list of the global seeing if all the uses are load or store.
2010 // If there is anything else, bail out.
2011 for (Value::use_iterator I = GV->use_begin(), E = GV->use_end(); I != E; ++I)
2012 if (!isa<LoadInst>(I) && !isa<StoreInst>(I))
2015 DEBUG(errs() << " *** SHRINKING TO BOOL: " << *GV);
2017 // Create the new global, initializing it to false.
2018 GlobalVariable *NewGV = new GlobalVariable(Context,
2019 Type::getInt1Ty(Context), false,
2020 GlobalValue::InternalLinkage, ConstantInt::getFalse(Context),
2022 GV->isThreadLocal());
2023 GV->getParent()->getGlobalList().insert(GV, NewGV);
2025 Constant *InitVal = GV->getInitializer();
2026 assert(InitVal->getType() != Type::getInt1Ty(Context) &&
2027 "No reason to shrink to bool!");
2029 // If initialized to zero and storing one into the global, we can use a cast
2030 // instead of a select to synthesize the desired value.
2031 bool IsOneZero = false;
2032 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal))
2033 IsOneZero = InitVal->isNullValue() && CI->isOne();
2035 while (!GV->use_empty()) {
2036 Instruction *UI = cast<Instruction>(GV->use_back());
2037 if (StoreInst *SI = dyn_cast<StoreInst>(UI)) {
2038 // Change the store into a boolean store.
2039 bool StoringOther = SI->getOperand(0) == OtherVal;
2040 // Only do this if we weren't storing a loaded value.
2042 if (StoringOther || SI->getOperand(0) == InitVal)
2043 StoreVal = ConstantInt::get(Type::getInt1Ty(Context), StoringOther);
2045 // Otherwise, we are storing a previously loaded copy. To do this,
2046 // change the copy from copying the original value to just copying the
2048 Instruction *StoredVal = cast<Instruction>(SI->getOperand(0));
2050 // If we're already replaced the input, StoredVal will be a cast or
2051 // select instruction. If not, it will be a load of the original
2053 if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
2054 assert(LI->getOperand(0) == GV && "Not a copy!");
2055 // Insert a new load, to preserve the saved value.
2056 StoreVal = new LoadInst(NewGV, LI->getName()+".b", LI);
2058 assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) &&
2059 "This is not a form that we understand!");
2060 StoreVal = StoredVal->getOperand(0);
2061 assert(isa<LoadInst>(StoreVal) && "Not a load of NewGV!");
2064 new StoreInst(StoreVal, NewGV, SI);
2066 // Change the load into a load of bool then a select.
2067 LoadInst *LI = cast<LoadInst>(UI);
2068 LoadInst *NLI = new LoadInst(NewGV, LI->getName()+".b", LI);
2071 NSI = new ZExtInst(NLI, LI->getType(), "", LI);
2073 NSI = SelectInst::Create(NLI, OtherVal, InitVal, "", LI);
2075 LI->replaceAllUsesWith(NSI);
2077 UI->eraseFromParent();
2080 GV->eraseFromParent();
2085 /// ProcessInternalGlobal - Analyze the specified global variable and optimize
2086 /// it if possible. If we make a change, return true.
2087 bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
2088 Module::global_iterator &GVI) {
2089 SmallPtrSet<PHINode*, 16> PHIUsers;
2091 GV->removeDeadConstantUsers();
2093 if (GV->use_empty()) {
2094 DEBUG(errs() << "GLOBAL DEAD: " << *GV);
2095 GV->eraseFromParent();
2100 if (!AnalyzeGlobal(GV, GS, PHIUsers)) {
2102 cerr << "Global: " << *GV;
2103 cerr << " isLoaded = " << GS.isLoaded << "\n";
2104 cerr << " StoredType = ";
2105 switch (GS.StoredType) {
2106 case GlobalStatus::NotStored: cerr << "NEVER STORED\n"; break;
2107 case GlobalStatus::isInitializerStored: cerr << "INIT STORED\n"; break;
2108 case GlobalStatus::isStoredOnce: cerr << "STORED ONCE\n"; break;
2109 case GlobalStatus::isStored: cerr << "stored\n"; break;
2111 if (GS.StoredType == GlobalStatus::isStoredOnce && GS.StoredOnceValue)
2112 cerr << " StoredOnceValue = " << *GS.StoredOnceValue << "\n";
2113 if (GS.AccessingFunction && !GS.HasMultipleAccessingFunctions)
2114 cerr << " AccessingFunction = " << GS.AccessingFunction->getName()
2116 cerr << " HasMultipleAccessingFunctions = "
2117 << GS.HasMultipleAccessingFunctions << "\n";
2118 cerr << " HasNonInstructionUser = " << GS.HasNonInstructionUser<<"\n";
2122 // If this is a first class global and has only one accessing function
2123 // and this function is main (which we know is not recursive we can make
2124 // this global a local variable) we replace the global with a local alloca
2125 // in this function.
2127 // NOTE: It doesn't make sense to promote non single-value types since we
2128 // are just replacing static memory to stack memory.
2130 // If the global is in different address space, don't bring it to stack.
2131 if (!GS.HasMultipleAccessingFunctions &&
2132 GS.AccessingFunction && !GS.HasNonInstructionUser &&
2133 GV->getType()->getElementType()->isSingleValueType() &&
2134 GS.AccessingFunction->getName() == "main" &&
2135 GS.AccessingFunction->hasExternalLinkage() &&
2136 GV->getType()->getAddressSpace() == 0) {
2137 DEBUG(errs() << "LOCALIZING GLOBAL: " << *GV);
2138 Instruction* FirstI = GS.AccessingFunction->getEntryBlock().begin();
2139 const Type* ElemTy = GV->getType()->getElementType();
2140 // FIXME: Pass Global's alignment when globals have alignment
2141 AllocaInst* Alloca = new AllocaInst(ElemTy, NULL, GV->getName(), FirstI);
2142 if (!isa<UndefValue>(GV->getInitializer()))
2143 new StoreInst(GV->getInitializer(), Alloca, FirstI);
2145 GV->replaceAllUsesWith(Alloca);
2146 GV->eraseFromParent();
2151 // If the global is never loaded (but may be stored to), it is dead.
2154 DEBUG(errs() << "GLOBAL NEVER LOADED: " << *GV);
2156 // Delete any stores we can find to the global. We may not be able to
2157 // make it completely dead though.
2158 bool Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(),
2161 // If the global is dead now, delete it.
2162 if (GV->use_empty()) {
2163 GV->eraseFromParent();
2169 } else if (GS.StoredType <= GlobalStatus::isInitializerStored) {
2170 DEBUG(errs() << "MARKING CONSTANT: " << *GV);
2171 GV->setConstant(true);
2173 // Clean up any obviously simplifiable users now.
2174 CleanupConstantGlobalUsers(GV, GV->getInitializer(), GV->getContext());
2176 // If the global is dead now, just nuke it.
2177 if (GV->use_empty()) {
2178 DEBUG(errs() << " *** Marking constant allowed us to simplify "
2179 << "all users and delete global!\n");
2180 GV->eraseFromParent();
2186 } else if (!GV->getInitializer()->getType()->isSingleValueType()) {
2187 if (TargetData *TD = getAnalysisIfAvailable<TargetData>())
2188 if (GlobalVariable *FirstNewGV = SRAGlobal(GV, *TD,
2189 GV->getContext())) {
2190 GVI = FirstNewGV; // Don't skip the newly produced globals!
2193 } else if (GS.StoredType == GlobalStatus::isStoredOnce) {
2194 // If the initial value for the global was an undef value, and if only
2195 // one other value was stored into it, we can just change the
2196 // initializer to be the stored value, then delete all stores to the
2197 // global. This allows us to mark it constant.
2198 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue))
2199 if (isa<UndefValue>(GV->getInitializer())) {
2200 // Change the initial value here.
2201 GV->setInitializer(SOVConstant);
2203 // Clean up any obviously simplifiable users now.
2204 CleanupConstantGlobalUsers(GV, GV->getInitializer(),
2207 if (GV->use_empty()) {
2208 DEBUG(errs() << " *** Substituting initializer allowed us to "
2209 << "simplify all users and delete global!\n");
2210 GV->eraseFromParent();
2219 // Try to optimize globals based on the knowledge that only one value
2220 // (besides its initializer) is ever stored to the global.
2221 if (OptimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GVI,
2222 getAnalysisIfAvailable<TargetData>(),
2226 // Otherwise, if the global was not a boolean, we can shrink it to be a
2228 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue))
2229 if (TryToShrinkGlobalToBoolean(GV, SOVConstant, GV->getContext())) {
2238 /// ChangeCalleesToFastCall - Walk all of the direct calls of the specified
2239 /// function, changing them to FastCC.
2240 static void ChangeCalleesToFastCall(Function *F) {
2241 for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){
2242 CallSite User(cast<Instruction>(*UI));
2243 User.setCallingConv(CallingConv::Fast);
2247 static AttrListPtr StripNest(const AttrListPtr &Attrs) {
2248 for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) {
2249 if ((Attrs.getSlot(i).Attrs & Attribute::Nest) == 0)
2252 // There can be only one.
2253 return Attrs.removeAttr(Attrs.getSlot(i).Index, Attribute::Nest);
2259 static void RemoveNestAttribute(Function *F) {
2260 F->setAttributes(StripNest(F->getAttributes()));
2261 for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){
2262 CallSite User(cast<Instruction>(*UI));
2263 User.setAttributes(StripNest(User.getAttributes()));
2267 bool GlobalOpt::OptimizeFunctions(Module &M) {
2268 bool Changed = false;
2269 // Optimize functions.
2270 for (Module::iterator FI = M.begin(), E = M.end(); FI != E; ) {
2272 // Functions without names cannot be referenced outside this module.
2273 if (!F->hasName() && !F->isDeclaration())
2274 F->setLinkage(GlobalValue::InternalLinkage);
2275 F->removeDeadConstantUsers();
2276 if (F->use_empty() && (F->hasLocalLinkage() ||
2277 F->hasLinkOnceLinkage())) {
2278 M.getFunctionList().erase(F);
2281 } else if (F->hasLocalLinkage()) {
2282 if (F->getCallingConv() == CallingConv::C && !F->isVarArg() &&
2283 !F->hasAddressTaken()) {
2284 // If this function has C calling conventions, is not a varargs
2285 // function, and is only called directly, promote it to use the Fast
2286 // calling convention.
2287 F->setCallingConv(CallingConv::Fast);
2288 ChangeCalleesToFastCall(F);
2293 if (F->getAttributes().hasAttrSomewhere(Attribute::Nest) &&
2294 !F->hasAddressTaken()) {
2295 // The function is not used by a trampoline intrinsic, so it is safe
2296 // to remove the 'nest' attribute.
2297 RemoveNestAttribute(F);
2306 bool GlobalOpt::OptimizeGlobalVars(Module &M) {
2307 bool Changed = false;
2308 for (Module::global_iterator GVI = M.global_begin(), E = M.global_end();
2310 GlobalVariable *GV = GVI++;
2311 // Global variables without names cannot be referenced outside this module.
2312 if (!GV->hasName() && !GV->isDeclaration())
2313 GV->setLinkage(GlobalValue::InternalLinkage);
2314 if (!GV->isConstant() && GV->hasLocalLinkage() &&
2315 GV->hasInitializer())
2316 Changed |= ProcessInternalGlobal(GV, GVI);
2321 /// FindGlobalCtors - Find the llvm.globalctors list, verifying that all
2322 /// initializers have an init priority of 65535.
2323 GlobalVariable *GlobalOpt::FindGlobalCtors(Module &M) {
2324 for (Module::global_iterator I = M.global_begin(), E = M.global_end();
2326 if (I->getName() == "llvm.global_ctors") {
2327 // Found it, verify it's an array of { int, void()* }.
2328 const ArrayType *ATy =dyn_cast<ArrayType>(I->getType()->getElementType());
2330 const StructType *STy = dyn_cast<StructType>(ATy->getElementType());
2331 if (!STy || STy->getNumElements() != 2 ||
2332 STy->getElementType(0) != Type::getInt32Ty(M.getContext())) return 0;
2333 const PointerType *PFTy = dyn_cast<PointerType>(STy->getElementType(1));
2334 if (!PFTy) return 0;
2335 const FunctionType *FTy = dyn_cast<FunctionType>(PFTy->getElementType());
2336 if (!FTy || FTy->getReturnType() != Type::getVoidTy(M.getContext()) ||
2337 FTy->isVarArg() || FTy->getNumParams() != 0)
2340 // Verify that the initializer is simple enough for us to handle.
2341 if (!I->hasDefinitiveInitializer()) return 0;
2342 ConstantArray *CA = dyn_cast<ConstantArray>(I->getInitializer());
2344 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i)
2345 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(*i)) {
2346 if (isa<ConstantPointerNull>(CS->getOperand(1)))
2349 // Must have a function or null ptr.
2350 if (!isa<Function>(CS->getOperand(1)))
2353 // Init priority must be standard.
2354 ConstantInt *CI = dyn_cast<ConstantInt>(CS->getOperand(0));
2355 if (!CI || CI->getZExtValue() != 65535)
2366 /// ParseGlobalCtors - Given a llvm.global_ctors list that we can understand,
2367 /// return a list of the functions and null terminator as a vector.
2368 static std::vector<Function*> ParseGlobalCtors(GlobalVariable *GV) {
2369 ConstantArray *CA = cast<ConstantArray>(GV->getInitializer());
2370 std::vector<Function*> Result;
2371 Result.reserve(CA->getNumOperands());
2372 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) {
2373 ConstantStruct *CS = cast<ConstantStruct>(*i);
2374 Result.push_back(dyn_cast<Function>(CS->getOperand(1)));
2379 /// InstallGlobalCtors - Given a specified llvm.global_ctors list, install the
2380 /// specified array, returning the new global to use.
2381 static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL,
2382 const std::vector<Function*> &Ctors,
2383 LLVMContext &Context) {
2384 // If we made a change, reassemble the initializer list.
2385 std::vector<Constant*> CSVals;
2386 CSVals.push_back(ConstantInt::get(Type::getInt32Ty(Context), 65535));
2387 CSVals.push_back(0);
2389 // Create the new init list.
2390 std::vector<Constant*> CAList;
2391 for (unsigned i = 0, e = Ctors.size(); i != e; ++i) {
2393 CSVals[1] = Ctors[i];
2395 const Type *FTy = FunctionType::get(Type::getVoidTy(Context), false);
2396 const PointerType *PFTy = PointerType::getUnqual(FTy);
2397 CSVals[1] = Constant::getNullValue(PFTy);
2398 CSVals[0] = ConstantInt::get(Type::getInt32Ty(Context), 2147483647);
2400 CAList.push_back(ConstantStruct::get(Context, CSVals, false));
2403 // Create the array initializer.
2404 const Type *StructTy =
2405 cast<ArrayType>(GCL->getType()->getElementType())->getElementType();
2406 Constant *CA = ConstantArray::get(ArrayType::get(StructTy,
2407 CAList.size()), CAList);
2409 // If we didn't change the number of elements, don't create a new GV.
2410 if (CA->getType() == GCL->getInitializer()->getType()) {
2411 GCL->setInitializer(CA);
2415 // Create the new global and insert it next to the existing list.
2416 GlobalVariable *NGV = new GlobalVariable(Context, CA->getType(),
2418 GCL->getLinkage(), CA, "",
2419 GCL->isThreadLocal());
2420 GCL->getParent()->getGlobalList().insert(GCL, NGV);
2423 // Nuke the old list, replacing any uses with the new one.
2424 if (!GCL->use_empty()) {
2426 if (V->getType() != GCL->getType())
2427 V = ConstantExpr::getBitCast(V, GCL->getType());
2428 GCL->replaceAllUsesWith(V);
2430 GCL->eraseFromParent();
2439 static Constant *getVal(DenseMap<Value*, Constant*> &ComputedValues,
2441 if (Constant *CV = dyn_cast<Constant>(V)) return CV;
2442 Constant *R = ComputedValues[V];
2443 assert(R && "Reference to an uncomputed value!");
2447 /// isSimpleEnoughPointerToCommit - Return true if this constant is simple
2448 /// enough for us to understand. In particular, if it is a cast of something,
2449 /// we punt. We basically just support direct accesses to globals and GEP's of
2450 /// globals. This should be kept up to date with CommitValueTo.
2451 static bool isSimpleEnoughPointerToCommit(Constant *C, LLVMContext &Context) {
2452 // Conservatively, avoid aggregate types. This is because we don't
2453 // want to worry about them partially overlapping other stores.
2454 if (!cast<PointerType>(C->getType())->getElementType()->isSingleValueType())
2457 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C))
2458 // Do not allow weak/linkonce/dllimport/dllexport linkage or
2459 // external globals.
2460 return GV->hasDefinitiveInitializer();
2462 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2463 // Handle a constantexpr gep.
2464 if (CE->getOpcode() == Instruction::GetElementPtr &&
2465 isa<GlobalVariable>(CE->getOperand(0)) &&
2466 cast<GEPOperator>(CE)->isInBounds()) {
2467 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0));
2468 // Do not allow weak/linkonce/dllimport/dllexport linkage or
2469 // external globals.
2470 if (!GV->hasDefinitiveInitializer())
2473 // The first index must be zero.
2474 ConstantInt *CI = dyn_cast<ConstantInt>(*next(CE->op_begin()));
2475 if (!CI || !CI->isZero()) return false;
2477 // The remaining indices must be compile-time known integers within the
2478 // notional bounds of the corresponding static array types.
2479 if (!CE->isGEPWithNoNotionalOverIndexing())
2482 return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE);
2487 /// EvaluateStoreInto - Evaluate a piece of a constantexpr store into a global
2488 /// initializer. This returns 'Init' modified to reflect 'Val' stored into it.
2489 /// At this point, the GEP operands of Addr [0, OpNo) have been stepped into.
2490 static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
2491 ConstantExpr *Addr, unsigned OpNo,
2492 LLVMContext &Context) {
2493 // Base case of the recursion.
2494 if (OpNo == Addr->getNumOperands()) {
2495 assert(Val->getType() == Init->getType() && "Type mismatch!");
2499 if (const StructType *STy = dyn_cast<StructType>(Init->getType())) {
2500 std::vector<Constant*> Elts;
2502 // Break up the constant into its elements.
2503 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) {
2504 for (User::op_iterator i = CS->op_begin(), e = CS->op_end(); i != e; ++i)
2505 Elts.push_back(cast<Constant>(*i));
2506 } else if (isa<ConstantAggregateZero>(Init)) {
2507 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
2508 Elts.push_back(Constant::getNullValue(STy->getElementType(i)));
2509 } else if (isa<UndefValue>(Init)) {
2510 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
2511 Elts.push_back(UndefValue::get(STy->getElementType(i)));
2513 llvm_unreachable("This code is out of sync with "
2514 " ConstantFoldLoadThroughGEPConstantExpr");
2517 // Replace the element that we are supposed to.
2518 ConstantInt *CU = cast<ConstantInt>(Addr->getOperand(OpNo));
2519 unsigned Idx = CU->getZExtValue();
2520 assert(Idx < STy->getNumElements() && "Struct index out of range!");
2521 Elts[Idx] = EvaluateStoreInto(Elts[Idx], Val, Addr, OpNo+1, Context);
2523 // Return the modified struct.
2524 return ConstantStruct::get(Context, &Elts[0], Elts.size(), STy->isPacked());
2526 ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo));
2527 const ArrayType *ATy = cast<ArrayType>(Init->getType());
2529 // Break up the array into elements.
2530 std::vector<Constant*> Elts;
2531 if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) {
2532 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i)
2533 Elts.push_back(cast<Constant>(*i));
2534 } else if (isa<ConstantAggregateZero>(Init)) {
2535 Constant *Elt = Constant::getNullValue(ATy->getElementType());
2536 Elts.assign(ATy->getNumElements(), Elt);
2537 } else if (isa<UndefValue>(Init)) {
2538 Constant *Elt = UndefValue::get(ATy->getElementType());
2539 Elts.assign(ATy->getNumElements(), Elt);
2541 llvm_unreachable("This code is out of sync with "
2542 " ConstantFoldLoadThroughGEPConstantExpr");
2545 assert(CI->getZExtValue() < ATy->getNumElements());
2546 Elts[CI->getZExtValue()] =
2547 EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1, Context);
2548 return ConstantArray::get(ATy, Elts);
2552 /// CommitValueTo - We have decided that Addr (which satisfies the predicate
2553 /// isSimpleEnoughPointerToCommit) should get Val as its value. Make it happen.
2554 static void CommitValueTo(Constant *Val, Constant *Addr,
2555 LLVMContext &Context) {
2556 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
2557 assert(GV->hasInitializer());
2558 GV->setInitializer(Val);
2562 ConstantExpr *CE = cast<ConstantExpr>(Addr);
2563 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0));
2565 Constant *Init = GV->getInitializer();
2566 Init = EvaluateStoreInto(Init, Val, CE, 2, Context);
2567 GV->setInitializer(Init);
2570 /// ComputeLoadResult - Return the value that would be computed by a load from
2571 /// P after the stores reflected by 'memory' have been performed. If we can't
2572 /// decide, return null.
2573 static Constant *ComputeLoadResult(Constant *P,
2574 const DenseMap<Constant*, Constant*> &Memory,
2575 LLVMContext &Context) {
2576 // If this memory location has been recently stored, use the stored value: it
2577 // is the most up-to-date.
2578 DenseMap<Constant*, Constant*>::const_iterator I = Memory.find(P);
2579 if (I != Memory.end()) return I->second;
2582 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
2583 if (GV->hasDefinitiveInitializer())
2584 return GV->getInitializer();
2588 // Handle a constantexpr getelementptr.
2589 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(P))
2590 if (CE->getOpcode() == Instruction::GetElementPtr &&
2591 isa<GlobalVariable>(CE->getOperand(0))) {
2592 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0));
2593 if (GV->hasDefinitiveInitializer())
2594 return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE);
2597 return 0; // don't know how to evaluate.
2600 /// EvaluateFunction - Evaluate a call to function F, returning true if
2601 /// successful, false if we can't evaluate it. ActualArgs contains the formal
2602 /// arguments for the function.
2603 static bool EvaluateFunction(Function *F, Constant *&RetVal,
2604 const SmallVectorImpl<Constant*> &ActualArgs,
2605 std::vector<Function*> &CallStack,
2606 DenseMap<Constant*, Constant*> &MutatedMemory,
2607 std::vector<GlobalVariable*> &AllocaTmps) {
2608 // Check to see if this function is already executing (recursion). If so,
2609 // bail out. TODO: we might want to accept limited recursion.
2610 if (std::find(CallStack.begin(), CallStack.end(), F) != CallStack.end())
2613 LLVMContext &Context = F->getContext();
2615 CallStack.push_back(F);
2617 /// Values - As we compute SSA register values, we store their contents here.
2618 DenseMap<Value*, Constant*> Values;
2620 // Initialize arguments to the incoming values specified.
2622 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E;
2624 Values[AI] = ActualArgs[ArgNo];
2626 /// ExecutedBlocks - We only handle non-looping, non-recursive code. As such,
2627 /// we can only evaluate any one basic block at most once. This set keeps
2628 /// track of what we have executed so we can detect recursive cases etc.
2629 SmallPtrSet<BasicBlock*, 32> ExecutedBlocks;
2631 // CurInst - The current instruction we're evaluating.
2632 BasicBlock::iterator CurInst = F->begin()->begin();
2634 // This is the main evaluation loop.
2636 Constant *InstResult = 0;
2638 if (StoreInst *SI = dyn_cast<StoreInst>(CurInst)) {
2639 if (SI->isVolatile()) return false; // no volatile accesses.
2640 Constant *Ptr = getVal(Values, SI->getOperand(1));
2641 if (!isSimpleEnoughPointerToCommit(Ptr, Context))
2642 // If this is too complex for us to commit, reject it.
2644 Constant *Val = getVal(Values, SI->getOperand(0));
2645 MutatedMemory[Ptr] = Val;
2646 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CurInst)) {
2647 InstResult = ConstantExpr::get(BO->getOpcode(),
2648 getVal(Values, BO->getOperand(0)),
2649 getVal(Values, BO->getOperand(1)));
2650 } else if (CmpInst *CI = dyn_cast<CmpInst>(CurInst)) {
2651 InstResult = ConstantExpr::getCompare(CI->getPredicate(),
2652 getVal(Values, CI->getOperand(0)),
2653 getVal(Values, CI->getOperand(1)));
2654 } else if (CastInst *CI = dyn_cast<CastInst>(CurInst)) {
2655 InstResult = ConstantExpr::getCast(CI->getOpcode(),
2656 getVal(Values, CI->getOperand(0)),
2658 } else if (SelectInst *SI = dyn_cast<SelectInst>(CurInst)) {
2660 ConstantExpr::getSelect(getVal(Values, SI->getOperand(0)),
2661 getVal(Values, SI->getOperand(1)),
2662 getVal(Values, SI->getOperand(2)));
2663 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurInst)) {
2664 Constant *P = getVal(Values, GEP->getOperand(0));
2665 SmallVector<Constant*, 8> GEPOps;
2666 for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end();
2668 GEPOps.push_back(getVal(Values, *i));
2669 InstResult = cast<GEPOperator>(GEP)->isInBounds() ?
2670 ConstantExpr::getInBoundsGetElementPtr(P, &GEPOps[0], GEPOps.size()) :
2671 ConstantExpr::getGetElementPtr(P, &GEPOps[0], GEPOps.size());
2672 } else if (LoadInst *LI = dyn_cast<LoadInst>(CurInst)) {
2673 if (LI->isVolatile()) return false; // no volatile accesses.
2674 InstResult = ComputeLoadResult(getVal(Values, LI->getOperand(0)),
2675 MutatedMemory, Context);
2676 if (InstResult == 0) return false; // Could not evaluate load.
2677 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(CurInst)) {
2678 if (AI->isArrayAllocation()) return false; // Cannot handle array allocs.
2679 const Type *Ty = AI->getType()->getElementType();
2680 AllocaTmps.push_back(new GlobalVariable(Context, Ty, false,
2681 GlobalValue::InternalLinkage,
2682 UndefValue::get(Ty),
2684 InstResult = AllocaTmps.back();
2685 } else if (CallInst *CI = dyn_cast<CallInst>(CurInst)) {
2687 // Debug info can safely be ignored here.
2688 if (isa<DbgInfoIntrinsic>(CI)) {
2693 // Cannot handle inline asm.
2694 if (isa<InlineAsm>(CI->getOperand(0))) return false;
2696 // Resolve function pointers.
2697 Function *Callee = dyn_cast<Function>(getVal(Values, CI->getOperand(0)));
2698 if (!Callee) return false; // Cannot resolve.
2700 SmallVector<Constant*, 8> Formals;
2701 for (User::op_iterator i = CI->op_begin() + 1, e = CI->op_end();
2703 Formals.push_back(getVal(Values, *i));
2705 if (Callee->isDeclaration()) {
2706 // If this is a function we can constant fold, do it.
2707 if (Constant *C = ConstantFoldCall(Callee, Formals.data(),
2714 if (Callee->getFunctionType()->isVarArg())
2718 // Execute the call, if successful, use the return value.
2719 if (!EvaluateFunction(Callee, RetVal, Formals, CallStack,
2720 MutatedMemory, AllocaTmps))
2722 InstResult = RetVal;
2724 } else if (isa<TerminatorInst>(CurInst)) {
2725 BasicBlock *NewBB = 0;
2726 if (BranchInst *BI = dyn_cast<BranchInst>(CurInst)) {
2727 if (BI->isUnconditional()) {
2728 NewBB = BI->getSuccessor(0);
2731 dyn_cast<ConstantInt>(getVal(Values, BI->getCondition()));
2732 if (!Cond) return false; // Cannot determine.
2734 NewBB = BI->getSuccessor(!Cond->getZExtValue());
2736 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(CurInst)) {
2738 dyn_cast<ConstantInt>(getVal(Values, SI->getCondition()));
2739 if (!Val) return false; // Cannot determine.
2740 NewBB = SI->getSuccessor(SI->findCaseValue(Val));
2741 } else if (ReturnInst *RI = dyn_cast<ReturnInst>(CurInst)) {
2742 if (RI->getNumOperands())
2743 RetVal = getVal(Values, RI->getOperand(0));
2745 CallStack.pop_back(); // return from fn.
2746 return true; // We succeeded at evaluating this ctor!
2748 // invoke, unwind, unreachable.
2749 return false; // Cannot handle this terminator.
2752 // Okay, we succeeded in evaluating this control flow. See if we have
2753 // executed the new block before. If so, we have a looping function,
2754 // which we cannot evaluate in reasonable time.
2755 if (!ExecutedBlocks.insert(NewBB))
2756 return false; // looped!
2758 // Okay, we have never been in this block before. Check to see if there
2759 // are any PHI nodes. If so, evaluate them with information about where
2761 BasicBlock *OldBB = CurInst->getParent();
2762 CurInst = NewBB->begin();
2764 for (; (PN = dyn_cast<PHINode>(CurInst)); ++CurInst)
2765 Values[PN] = getVal(Values, PN->getIncomingValueForBlock(OldBB));
2767 // Do NOT increment CurInst. We know that the terminator had no value.
2770 // Did not know how to evaluate this!
2774 if (!CurInst->use_empty())
2775 Values[CurInst] = InstResult;
2777 // Advance program counter.
2782 /// EvaluateStaticConstructor - Evaluate static constructors in the function, if
2783 /// we can. Return true if we can, false otherwise.
2784 static bool EvaluateStaticConstructor(Function *F) {
2785 /// MutatedMemory - For each store we execute, we update this map. Loads
2786 /// check this to get the most up-to-date value. If evaluation is successful,
2787 /// this state is committed to the process.
2788 DenseMap<Constant*, Constant*> MutatedMemory;
2790 /// AllocaTmps - To 'execute' an alloca, we create a temporary global variable
2791 /// to represent its body. This vector is needed so we can delete the
2792 /// temporary globals when we are done.
2793 std::vector<GlobalVariable*> AllocaTmps;
2795 /// CallStack - This is used to detect recursion. In pathological situations
2796 /// we could hit exponential behavior, but at least there is nothing
2798 std::vector<Function*> CallStack;
2800 // Call the function.
2801 Constant *RetValDummy;
2802 bool EvalSuccess = EvaluateFunction(F, RetValDummy,
2803 SmallVector<Constant*, 0>(), CallStack,
2804 MutatedMemory, AllocaTmps);
2806 // We succeeded at evaluation: commit the result.
2807 DEBUG(errs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '"
2808 << F->getName() << "' to " << MutatedMemory.size()
2810 for (DenseMap<Constant*, Constant*>::iterator I = MutatedMemory.begin(),
2811 E = MutatedMemory.end(); I != E; ++I)
2812 CommitValueTo(I->second, I->first, F->getContext());
2815 // At this point, we are done interpreting. If we created any 'alloca'
2816 // temporaries, release them now.
2817 while (!AllocaTmps.empty()) {
2818 GlobalVariable *Tmp = AllocaTmps.back();
2819 AllocaTmps.pop_back();
2821 // If there are still users of the alloca, the program is doing something
2822 // silly, e.g. storing the address of the alloca somewhere and using it
2823 // later. Since this is undefined, we'll just make it be null.
2824 if (!Tmp->use_empty())
2825 Tmp->replaceAllUsesWith(Constant::getNullValue(Tmp->getType()));
2834 /// OptimizeGlobalCtorsList - Simplify and evaluation global ctors if possible.
2835 /// Return true if anything changed.
2836 bool GlobalOpt::OptimizeGlobalCtorsList(GlobalVariable *&GCL) {
2837 std::vector<Function*> Ctors = ParseGlobalCtors(GCL);
2838 bool MadeChange = false;
2839 if (Ctors.empty()) return false;
2841 // Loop over global ctors, optimizing them when we can.
2842 for (unsigned i = 0; i != Ctors.size(); ++i) {
2843 Function *F = Ctors[i];
2844 // Found a null terminator in the middle of the list, prune off the rest of
2847 if (i != Ctors.size()-1) {
2854 // We cannot simplify external ctor functions.
2855 if (F->empty()) continue;
2857 // If we can evaluate the ctor at compile time, do.
2858 if (EvaluateStaticConstructor(F)) {
2859 Ctors.erase(Ctors.begin()+i);
2862 ++NumCtorsEvaluated;
2867 if (!MadeChange) return false;
2869 GCL = InstallGlobalCtors(GCL, Ctors, GCL->getContext());
2873 bool GlobalOpt::OptimizeGlobalAliases(Module &M) {
2874 bool Changed = false;
2876 for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end();
2878 Module::alias_iterator J = I++;
2879 // Aliases without names cannot be referenced outside this module.
2880 if (!J->hasName() && !J->isDeclaration())
2881 J->setLinkage(GlobalValue::InternalLinkage);
2882 // If the aliasee may change at link time, nothing can be done - bail out.
2883 if (J->mayBeOverridden())
2886 Constant *Aliasee = J->getAliasee();
2887 GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts());
2888 Target->removeDeadConstantUsers();
2889 bool hasOneUse = Target->hasOneUse() && Aliasee->hasOneUse();
2891 // Make all users of the alias use the aliasee instead.
2892 if (!J->use_empty()) {
2893 J->replaceAllUsesWith(Aliasee);
2894 ++NumAliasesResolved;
2898 // If the aliasee has internal linkage, give it the name and linkage
2899 // of the alias, and delete the alias. This turns:
2900 // define internal ... @f(...)
2901 // @a = alias ... @f
2903 // define ... @a(...)
2904 if (!Target->hasLocalLinkage())
2907 // The transform is only useful if the alias does not have internal linkage.
2908 if (J->hasLocalLinkage())
2911 // Do not perform the transform if multiple aliases potentially target the
2912 // aliasee. This check also ensures that it is safe to replace the section
2913 // and other attributes of the aliasee with those of the alias.
2917 // Give the aliasee the name, linkage and other attributes of the alias.
2918 Target->takeName(J);
2919 Target->setLinkage(J->getLinkage());
2920 Target->GlobalValue::copyAttributesFrom(J);
2922 // Delete the alias.
2923 M.getAliasList().erase(J);
2924 ++NumAliasesRemoved;
2931 bool GlobalOpt::runOnModule(Module &M) {
2932 bool Changed = false;
2934 // Try to find the llvm.globalctors list.
2935 GlobalVariable *GlobalCtors = FindGlobalCtors(M);
2937 bool LocalChange = true;
2938 while (LocalChange) {
2939 LocalChange = false;
2941 // Delete functions that are trivially dead, ccc -> fastcc
2942 LocalChange |= OptimizeFunctions(M);
2944 // Optimize global_ctors list.
2946 LocalChange |= OptimizeGlobalCtorsList(GlobalCtors);
2948 // Optimize non-address-taken globals.
2949 LocalChange |= OptimizeGlobalVars(M);
2951 // Resolve aliases, when possible.
2952 LocalChange |= OptimizeGlobalAliases(M);
2953 Changed |= LocalChange;
2956 // TODO: Move all global ctors functions to the end of the module for code