1 //===- GlobalOpt.cpp - Optimize Global Variables --------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass transforms simple global variables that never have their address
11 // taken. If obviously true, it marks read/write globals as constant, deletes
12 // variables only stored to, etc.
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "globalopt"
17 #include "llvm/Transforms/IPO.h"
18 #include "llvm/CallingConv.h"
19 #include "llvm/Constants.h"
20 #include "llvm/DerivedTypes.h"
21 #include "llvm/Instructions.h"
22 #include "llvm/IntrinsicInst.h"
23 #include "llvm/Module.h"
24 #include "llvm/Operator.h"
25 #include "llvm/Pass.h"
26 #include "llvm/Analysis/ConstantFolding.h"
27 #include "llvm/Analysis/MemoryBuiltins.h"
28 #include "llvm/Target/TargetData.h"
29 #include "llvm/Target/TargetLibraryInfo.h"
30 #include "llvm/Support/CallSite.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/GetElementPtrTypeIterator.h"
34 #include "llvm/Support/MathExtras.h"
35 #include "llvm/Support/raw_ostream.h"
36 #include "llvm/ADT/DenseMap.h"
37 #include "llvm/ADT/SmallPtrSet.h"
38 #include "llvm/ADT/SmallVector.h"
39 #include "llvm/ADT/Statistic.h"
40 #include "llvm/ADT/STLExtras.h"
44 STATISTIC(NumMarked , "Number of globals marked constant");
45 STATISTIC(NumUnnamed , "Number of globals marked unnamed_addr");
46 STATISTIC(NumSRA , "Number of aggregate globals broken into scalars");
47 STATISTIC(NumHeapSRA , "Number of heap objects SRA'd");
48 STATISTIC(NumSubstitute,"Number of globals with initializers stored into them");
49 STATISTIC(NumDeleted , "Number of globals deleted");
50 STATISTIC(NumFnDeleted , "Number of functions deleted");
51 STATISTIC(NumGlobUses , "Number of global uses devirtualized");
52 STATISTIC(NumLocalized , "Number of globals localized");
53 STATISTIC(NumShrunkToBool , "Number of global vars shrunk to booleans");
54 STATISTIC(NumFastCallFns , "Number of functions converted to fastcc");
55 STATISTIC(NumCtorsEvaluated, "Number of static ctors evaluated");
56 STATISTIC(NumNestRemoved , "Number of nest attributes removed");
57 STATISTIC(NumAliasesResolved, "Number of global aliases resolved");
58 STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated");
59 STATISTIC(NumCXXDtorsRemoved, "Number of global C++ destructors removed");
63 struct GlobalOpt : public ModulePass {
64 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<TargetLibraryInfo>();
67 static char ID; // Pass identification, replacement for typeid
68 GlobalOpt() : ModulePass(ID) {
69 initializeGlobalOptPass(*PassRegistry::getPassRegistry());
72 bool runOnModule(Module &M);
75 GlobalVariable *FindGlobalCtors(Module &M);
76 bool OptimizeFunctions(Module &M);
77 bool OptimizeGlobalVars(Module &M);
78 bool OptimizeGlobalAliases(Module &M);
79 bool OptimizeGlobalCtorsList(GlobalVariable *&GCL);
80 bool ProcessGlobal(GlobalVariable *GV,Module::global_iterator &GVI);
81 bool ProcessInternalGlobal(GlobalVariable *GV,Module::global_iterator &GVI,
82 const SmallPtrSet<const PHINode*, 16> &PHIUsers,
83 const GlobalStatus &GS);
84 bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn);
88 char GlobalOpt::ID = 0;
89 INITIALIZE_PASS_BEGIN(GlobalOpt, "globalopt",
90 "Global Variable Optimizer", false, false)
91 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
92 INITIALIZE_PASS_END(GlobalOpt, "globalopt",
93 "Global Variable Optimizer", false, false)
95 ModulePass *llvm::createGlobalOptimizerPass() { return new GlobalOpt(); }
99 /// GlobalStatus - As we analyze each global, keep track of some information
100 /// about it. If we find out that the address of the global is taken, none of
101 /// this info will be accurate.
102 struct GlobalStatus {
103 /// isCompared - True if the global's address is used in a comparison.
106 /// isLoaded - True if the global is ever loaded. If the global isn't ever
107 /// loaded it can be deleted.
110 /// StoredType - Keep track of what stores to the global look like.
113 /// NotStored - There is no store to this global. It can thus be marked
117 /// isInitializerStored - This global is stored to, but the only thing
118 /// stored is the constant it was initialized with. This is only tracked
119 /// for scalar globals.
122 /// isStoredOnce - This global is stored to, but only its initializer and
123 /// one other value is ever stored to it. If this global isStoredOnce, we
124 /// track the value stored to it in StoredOnceValue below. This is only
125 /// tracked for scalar globals.
128 /// isStored - This global is stored to by multiple values or something else
129 /// that we cannot track.
133 /// StoredOnceValue - If only one value (besides the initializer constant) is
134 /// ever stored to this global, keep track of what value it is.
135 Value *StoredOnceValue;
137 /// AccessingFunction/HasMultipleAccessingFunctions - These start out
138 /// null/false. When the first accessing function is noticed, it is recorded.
139 /// When a second different accessing function is noticed,
140 /// HasMultipleAccessingFunctions is set to true.
141 const Function *AccessingFunction;
142 bool HasMultipleAccessingFunctions;
144 /// HasNonInstructionUser - Set to true if this global has a user that is not
145 /// an instruction (e.g. a constant expr or GV initializer).
146 bool HasNonInstructionUser;
148 /// HasPHIUser - Set to true if this global has a user that is a PHI node.
151 GlobalStatus() : isCompared(false), isLoaded(false), StoredType(NotStored),
152 StoredOnceValue(0), AccessingFunction(0),
153 HasMultipleAccessingFunctions(false), HasNonInstructionUser(false),
159 // SafeToDestroyConstant - It is safe to destroy a constant iff it is only used
160 // by constants itself. Note that constants cannot be cyclic, so this test is
161 // pretty easy to implement recursively.
163 static bool SafeToDestroyConstant(const Constant *C) {
164 if (isa<GlobalValue>(C)) return false;
166 for (Value::const_use_iterator UI = C->use_begin(), E = C->use_end(); UI != E;
168 if (const Constant *CU = dyn_cast<Constant>(*UI)) {
169 if (!SafeToDestroyConstant(CU)) return false;
176 /// AnalyzeGlobal - Look at all uses of the global and fill in the GlobalStatus
177 /// structure. If the global has its address taken, return true to indicate we
178 /// can't do anything with it.
180 static bool AnalyzeGlobal(const Value *V, GlobalStatus &GS,
181 SmallPtrSet<const PHINode*, 16> &PHIUsers) {
182 for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;
185 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
186 GS.HasNonInstructionUser = true;
188 // If the result of the constantexpr isn't pointer type, then we won't
189 // know to expect it in various places. Just reject early.
190 if (!isa<PointerType>(CE->getType())) return true;
192 if (AnalyzeGlobal(CE, GS, PHIUsers)) return true;
193 } else if (const Instruction *I = dyn_cast<Instruction>(U)) {
194 if (!GS.HasMultipleAccessingFunctions) {
195 const Function *F = I->getParent()->getParent();
196 if (GS.AccessingFunction == 0)
197 GS.AccessingFunction = F;
198 else if (GS.AccessingFunction != F)
199 GS.HasMultipleAccessingFunctions = true;
201 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) {
203 // Don't hack on volatile/atomic loads.
204 if (!LI->isSimple()) return true;
205 } else if (const StoreInst *SI = dyn_cast<StoreInst>(I)) {
206 // Don't allow a store OF the address, only stores TO the address.
207 if (SI->getOperand(0) == V) return true;
209 // Don't hack on volatile/atomic stores.
210 if (!SI->isSimple()) return true;
212 // If this is a direct store to the global (i.e., the global is a scalar
213 // value, not an aggregate), keep more specific information about
215 if (GS.StoredType != GlobalStatus::isStored) {
216 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(
217 SI->getOperand(1))) {
218 Value *StoredVal = SI->getOperand(0);
219 if (StoredVal == GV->getInitializer()) {
220 if (GS.StoredType < GlobalStatus::isInitializerStored)
221 GS.StoredType = GlobalStatus::isInitializerStored;
222 } else if (isa<LoadInst>(StoredVal) &&
223 cast<LoadInst>(StoredVal)->getOperand(0) == GV) {
224 if (GS.StoredType < GlobalStatus::isInitializerStored)
225 GS.StoredType = GlobalStatus::isInitializerStored;
226 } else if (GS.StoredType < GlobalStatus::isStoredOnce) {
227 GS.StoredType = GlobalStatus::isStoredOnce;
228 GS.StoredOnceValue = StoredVal;
229 } else if (GS.StoredType == GlobalStatus::isStoredOnce &&
230 GS.StoredOnceValue == StoredVal) {
233 GS.StoredType = GlobalStatus::isStored;
236 GS.StoredType = GlobalStatus::isStored;
239 } else if (isa<GetElementPtrInst>(I)) {
240 if (AnalyzeGlobal(I, GS, PHIUsers)) return true;
241 } else if (isa<SelectInst>(I)) {
242 if (AnalyzeGlobal(I, GS, PHIUsers)) return true;
243 } else if (const PHINode *PN = dyn_cast<PHINode>(I)) {
244 // PHI nodes we can check just like select or GEP instructions, but we
245 // have to be careful about infinite recursion.
246 if (PHIUsers.insert(PN)) // Not already visited.
247 if (AnalyzeGlobal(I, GS, PHIUsers)) return true;
248 GS.HasPHIUser = true;
249 } else if (isa<CmpInst>(I)) {
250 GS.isCompared = true;
251 } else if (const MemTransferInst *MTI = dyn_cast<MemTransferInst>(I)) {
252 if (MTI->isVolatile()) return true;
253 if (MTI->getArgOperand(0) == V)
254 GS.StoredType = GlobalStatus::isStored;
255 if (MTI->getArgOperand(1) == V)
257 } else if (const MemSetInst *MSI = dyn_cast<MemSetInst>(I)) {
258 assert(MSI->getArgOperand(0) == V && "Memset only takes one pointer!");
259 if (MSI->isVolatile()) return true;
260 GS.StoredType = GlobalStatus::isStored;
262 return true; // Any other non-load instruction might take address!
264 } else if (const Constant *C = dyn_cast<Constant>(U)) {
265 GS.HasNonInstructionUser = true;
266 // We might have a dead and dangling constant hanging off of here.
267 if (!SafeToDestroyConstant(C))
270 GS.HasNonInstructionUser = true;
271 // Otherwise must be some other user.
279 static Constant *getAggregateConstantElement(Constant *Agg, Constant *Idx) {
280 ConstantInt *CI = dyn_cast<ConstantInt>(Idx);
282 unsigned IdxV = CI->getZExtValue();
284 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Agg)) {
285 if (IdxV < CS->getNumOperands()) return CS->getOperand(IdxV);
286 } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Agg)) {
287 if (IdxV < CA->getNumOperands()) return CA->getOperand(IdxV);
288 } else if (ConstantVector *CP = dyn_cast<ConstantVector>(Agg)) {
289 if (IdxV < CP->getNumOperands()) return CP->getOperand(IdxV);
290 } else if (isa<ConstantAggregateZero>(Agg)) {
291 if (StructType *STy = dyn_cast<StructType>(Agg->getType())) {
292 if (IdxV < STy->getNumElements())
293 return Constant::getNullValue(STy->getElementType(IdxV));
294 } else if (SequentialType *STy =
295 dyn_cast<SequentialType>(Agg->getType())) {
296 return Constant::getNullValue(STy->getElementType());
298 } else if (isa<UndefValue>(Agg)) {
299 if (StructType *STy = dyn_cast<StructType>(Agg->getType())) {
300 if (IdxV < STy->getNumElements())
301 return UndefValue::get(STy->getElementType(IdxV));
302 } else if (SequentialType *STy =
303 dyn_cast<SequentialType>(Agg->getType())) {
304 return UndefValue::get(STy->getElementType());
311 /// CleanupConstantGlobalUsers - We just marked GV constant. Loop over all
312 /// users of the global, cleaning up the obvious ones. This is largely just a
313 /// quick scan over the use list to clean up the easy and obvious cruft. This
314 /// returns true if it made a change.
315 static bool CleanupConstantGlobalUsers(Value *V, Constant *Init) {
316 bool Changed = false;
317 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;) {
320 if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
322 // Replace the load with the initializer.
323 LI->replaceAllUsesWith(Init);
324 LI->eraseFromParent();
327 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
328 // Store must be unreachable or storing Init into the global.
329 SI->eraseFromParent();
331 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
332 if (CE->getOpcode() == Instruction::GetElementPtr) {
333 Constant *SubInit = 0;
335 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
336 Changed |= CleanupConstantGlobalUsers(CE, SubInit);
337 } else if (CE->getOpcode() == Instruction::BitCast &&
338 CE->getType()->isPointerTy()) {
339 // Pointer cast, delete any stores and memsets to the global.
340 Changed |= CleanupConstantGlobalUsers(CE, 0);
343 if (CE->use_empty()) {
344 CE->destroyConstant();
347 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
348 // Do not transform "gepinst (gep constexpr (GV))" here, because forming
349 // "gepconstexpr (gep constexpr (GV))" will cause the two gep's to fold
350 // and will invalidate our notion of what Init is.
351 Constant *SubInit = 0;
352 if (!isa<ConstantExpr>(GEP->getOperand(0))) {
353 // FIXME: use TargetData/TargetLibraryInfo for smarter constant folding.
355 dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP));
356 if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr)
357 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
359 Changed |= CleanupConstantGlobalUsers(GEP, SubInit);
361 if (GEP->use_empty()) {
362 GEP->eraseFromParent();
365 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U)) { // memset/cpy/mv
366 if (MI->getRawDest() == V) {
367 MI->eraseFromParent();
371 } else if (Constant *C = dyn_cast<Constant>(U)) {
372 // If we have a chain of dead constantexprs or other things dangling from
373 // us, and if they are all dead, nuke them without remorse.
374 if (SafeToDestroyConstant(C)) {
375 C->destroyConstant();
376 // This could have invalidated UI, start over from scratch.
377 CleanupConstantGlobalUsers(V, Init);
385 /// isSafeSROAElementUse - Return true if the specified instruction is a safe
386 /// user of a derived expression from a global that we want to SROA.
387 static bool isSafeSROAElementUse(Value *V) {
388 // We might have a dead and dangling constant hanging off of here.
389 if (Constant *C = dyn_cast<Constant>(V))
390 return SafeToDestroyConstant(C);
392 Instruction *I = dyn_cast<Instruction>(V);
393 if (!I) return false;
396 if (isa<LoadInst>(I)) return true;
398 // Stores *to* the pointer are ok.
399 if (StoreInst *SI = dyn_cast<StoreInst>(I))
400 return SI->getOperand(0) != V;
402 // Otherwise, it must be a GEP.
403 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I);
404 if (GEPI == 0) return false;
406 if (GEPI->getNumOperands() < 3 || !isa<Constant>(GEPI->getOperand(1)) ||
407 !cast<Constant>(GEPI->getOperand(1))->isNullValue())
410 for (Value::use_iterator I = GEPI->use_begin(), E = GEPI->use_end();
412 if (!isSafeSROAElementUse(*I))
418 /// IsUserOfGlobalSafeForSRA - U is a direct user of the specified global value.
419 /// Look at it and its uses and decide whether it is safe to SROA this global.
421 static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) {
422 // The user of the global must be a GEP Inst or a ConstantExpr GEP.
423 if (!isa<GetElementPtrInst>(U) &&
424 (!isa<ConstantExpr>(U) ||
425 cast<ConstantExpr>(U)->getOpcode() != Instruction::GetElementPtr))
428 // Check to see if this ConstantExpr GEP is SRA'able. In particular, we
429 // don't like < 3 operand CE's, and we don't like non-constant integer
430 // indices. This enforces that all uses are 'gep GV, 0, C, ...' for some
432 if (U->getNumOperands() < 3 || !isa<Constant>(U->getOperand(1)) ||
433 !cast<Constant>(U->getOperand(1))->isNullValue() ||
434 !isa<ConstantInt>(U->getOperand(2)))
437 gep_type_iterator GEPI = gep_type_begin(U), E = gep_type_end(U);
438 ++GEPI; // Skip over the pointer index.
440 // If this is a use of an array allocation, do a bit more checking for sanity.
441 if (ArrayType *AT = dyn_cast<ArrayType>(*GEPI)) {
442 uint64_t NumElements = AT->getNumElements();
443 ConstantInt *Idx = cast<ConstantInt>(U->getOperand(2));
445 // Check to make sure that index falls within the array. If not,
446 // something funny is going on, so we won't do the optimization.
448 if (Idx->getZExtValue() >= NumElements)
451 // We cannot scalar repl this level of the array unless any array
452 // sub-indices are in-range constants. In particular, consider:
453 // A[0][i]. We cannot know that the user isn't doing invalid things like
454 // allowing i to index an out-of-range subscript that accesses A[1].
456 // Scalar replacing *just* the outer index of the array is probably not
457 // going to be a win anyway, so just give up.
458 for (++GEPI; // Skip array index.
461 uint64_t NumElements;
462 if (ArrayType *SubArrayTy = dyn_cast<ArrayType>(*GEPI))
463 NumElements = SubArrayTy->getNumElements();
464 else if (VectorType *SubVectorTy = dyn_cast<VectorType>(*GEPI))
465 NumElements = SubVectorTy->getNumElements();
467 assert((*GEPI)->isStructTy() &&
468 "Indexed GEP type is not array, vector, or struct!");
472 ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand());
473 if (!IdxVal || IdxVal->getZExtValue() >= NumElements)
478 for (Value::use_iterator I = U->use_begin(), E = U->use_end(); I != E; ++I)
479 if (!isSafeSROAElementUse(*I))
484 /// GlobalUsersSafeToSRA - Look at all uses of the global and decide whether it
485 /// is safe for us to perform this transformation.
487 static bool GlobalUsersSafeToSRA(GlobalValue *GV) {
488 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end();
490 if (!IsUserOfGlobalSafeForSRA(*UI, GV))
497 /// SRAGlobal - Perform scalar replacement of aggregates on the specified global
498 /// variable. This opens the door for other optimizations by exposing the
499 /// behavior of the program in a more fine-grained way. We have determined that
500 /// this transformation is safe already. We return the first global variable we
501 /// insert so that the caller can reprocess it.
502 static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) {
503 // Make sure this global only has simple uses that we can SRA.
504 if (!GlobalUsersSafeToSRA(GV))
507 assert(GV->hasLocalLinkage() && !GV->isConstant());
508 Constant *Init = GV->getInitializer();
509 Type *Ty = Init->getType();
511 std::vector<GlobalVariable*> NewGlobals;
512 Module::GlobalListType &Globals = GV->getParent()->getGlobalList();
514 // Get the alignment of the global, either explicit or target-specific.
515 unsigned StartAlignment = GV->getAlignment();
516 if (StartAlignment == 0)
517 StartAlignment = TD.getABITypeAlignment(GV->getType());
519 if (StructType *STy = dyn_cast<StructType>(Ty)) {
520 NewGlobals.reserve(STy->getNumElements());
521 const StructLayout &Layout = *TD.getStructLayout(STy);
522 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
523 Constant *In = getAggregateConstantElement(Init,
524 ConstantInt::get(Type::getInt32Ty(STy->getContext()), i));
525 assert(In && "Couldn't get element of initializer?");
526 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(i), false,
527 GlobalVariable::InternalLinkage,
528 In, GV->getName()+"."+Twine(i),
530 GV->getType()->getAddressSpace());
531 Globals.insert(GV, NGV);
532 NewGlobals.push_back(NGV);
534 // Calculate the known alignment of the field. If the original aggregate
535 // had 256 byte alignment for example, something might depend on that:
536 // propagate info to each field.
537 uint64_t FieldOffset = Layout.getElementOffset(i);
538 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, FieldOffset);
539 if (NewAlign > TD.getABITypeAlignment(STy->getElementType(i)))
540 NGV->setAlignment(NewAlign);
542 } else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) {
543 unsigned NumElements = 0;
544 if (ArrayType *ATy = dyn_cast<ArrayType>(STy))
545 NumElements = ATy->getNumElements();
547 NumElements = cast<VectorType>(STy)->getNumElements();
549 if (NumElements > 16 && GV->hasNUsesOrMore(16))
550 return 0; // It's not worth it.
551 NewGlobals.reserve(NumElements);
553 uint64_t EltSize = TD.getTypeAllocSize(STy->getElementType());
554 unsigned EltAlign = TD.getABITypeAlignment(STy->getElementType());
555 for (unsigned i = 0, e = NumElements; i != e; ++i) {
556 Constant *In = getAggregateConstantElement(Init,
557 ConstantInt::get(Type::getInt32Ty(Init->getContext()), i));
558 assert(In && "Couldn't get element of initializer?");
560 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(), false,
561 GlobalVariable::InternalLinkage,
562 In, GV->getName()+"."+Twine(i),
564 GV->getType()->getAddressSpace());
565 Globals.insert(GV, NGV);
566 NewGlobals.push_back(NGV);
568 // Calculate the known alignment of the field. If the original aggregate
569 // had 256 byte alignment for example, something might depend on that:
570 // propagate info to each field.
571 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, EltSize*i);
572 if (NewAlign > EltAlign)
573 NGV->setAlignment(NewAlign);
577 if (NewGlobals.empty())
580 DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV);
582 Constant *NullInt =Constant::getNullValue(Type::getInt32Ty(GV->getContext()));
584 // Loop over all of the uses of the global, replacing the constantexpr geps,
585 // with smaller constantexpr geps or direct references.
586 while (!GV->use_empty()) {
587 User *GEP = GV->use_back();
588 assert(((isa<ConstantExpr>(GEP) &&
589 cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)||
590 isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!");
592 // Ignore the 1th operand, which has to be zero or else the program is quite
593 // broken (undefined). Get the 2nd operand, which is the structure or array
595 unsigned Val = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue();
596 if (Val >= NewGlobals.size()) Val = 0; // Out of bound array access.
598 Value *NewPtr = NewGlobals[Val];
600 // Form a shorter GEP if needed.
601 if (GEP->getNumOperands() > 3) {
602 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GEP)) {
603 SmallVector<Constant*, 8> Idxs;
604 Idxs.push_back(NullInt);
605 for (unsigned i = 3, e = CE->getNumOperands(); i != e; ++i)
606 Idxs.push_back(CE->getOperand(i));
607 NewPtr = ConstantExpr::getGetElementPtr(cast<Constant>(NewPtr), Idxs);
609 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(GEP);
610 SmallVector<Value*, 8> Idxs;
611 Idxs.push_back(NullInt);
612 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i)
613 Idxs.push_back(GEPI->getOperand(i));
614 NewPtr = GetElementPtrInst::Create(NewPtr, Idxs,
615 GEPI->getName()+"."+Twine(Val),GEPI);
618 GEP->replaceAllUsesWith(NewPtr);
620 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(GEP))
621 GEPI->eraseFromParent();
623 cast<ConstantExpr>(GEP)->destroyConstant();
626 // Delete the old global, now that it is dead.
630 // Loop over the new globals array deleting any globals that are obviously
631 // dead. This can arise due to scalarization of a structure or an array that
632 // has elements that are dead.
633 unsigned FirstGlobal = 0;
634 for (unsigned i = 0, e = NewGlobals.size(); i != e; ++i)
635 if (NewGlobals[i]->use_empty()) {
636 Globals.erase(NewGlobals[i]);
637 if (FirstGlobal == i) ++FirstGlobal;
640 return FirstGlobal != NewGlobals.size() ? NewGlobals[FirstGlobal] : 0;
643 /// AllUsesOfValueWillTrapIfNull - Return true if all users of the specified
644 /// value will trap if the value is dynamically null. PHIs keeps track of any
645 /// phi nodes we've seen to avoid reprocessing them.
646 static bool AllUsesOfValueWillTrapIfNull(const Value *V,
647 SmallPtrSet<const PHINode*, 8> &PHIs) {
648 for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;
652 if (isa<LoadInst>(U)) {
654 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
655 if (SI->getOperand(0) == V) {
656 //cerr << "NONTRAPPING USE: " << *U;
657 return false; // Storing the value.
659 } else if (const CallInst *CI = dyn_cast<CallInst>(U)) {
660 if (CI->getCalledValue() != V) {
661 //cerr << "NONTRAPPING USE: " << *U;
662 return false; // Not calling the ptr
664 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(U)) {
665 if (II->getCalledValue() != V) {
666 //cerr << "NONTRAPPING USE: " << *U;
667 return false; // Not calling the ptr
669 } else if (const BitCastInst *CI = dyn_cast<BitCastInst>(U)) {
670 if (!AllUsesOfValueWillTrapIfNull(CI, PHIs)) return false;
671 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
672 if (!AllUsesOfValueWillTrapIfNull(GEPI, PHIs)) return false;
673 } else if (const PHINode *PN = dyn_cast<PHINode>(U)) {
674 // If we've already seen this phi node, ignore it, it has already been
676 if (PHIs.insert(PN) && !AllUsesOfValueWillTrapIfNull(PN, PHIs))
678 } else if (isa<ICmpInst>(U) &&
679 isa<ConstantPointerNull>(UI->getOperand(1))) {
680 // Ignore icmp X, null
682 //cerr << "NONTRAPPING USE: " << *U;
689 /// AllUsesOfLoadedValueWillTrapIfNull - Return true if all uses of any loads
690 /// from GV will trap if the loaded value is null. Note that this also permits
691 /// comparisons of the loaded value against null, as a special case.
692 static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) {
693 for (Value::const_use_iterator UI = GV->use_begin(), E = GV->use_end();
697 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
698 SmallPtrSet<const PHINode*, 8> PHIs;
699 if (!AllUsesOfValueWillTrapIfNull(LI, PHIs))
701 } else if (isa<StoreInst>(U)) {
702 // Ignore stores to the global.
704 // We don't know or understand this user, bail out.
705 //cerr << "UNKNOWN USER OF GLOBAL!: " << *U;
712 static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) {
713 bool Changed = false;
714 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ) {
715 Instruction *I = cast<Instruction>(*UI++);
716 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
717 LI->setOperand(0, NewV);
719 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
720 if (SI->getOperand(1) == V) {
721 SI->setOperand(1, NewV);
724 } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
726 if (CS.getCalledValue() == V) {
727 // Calling through the pointer! Turn into a direct call, but be careful
728 // that the pointer is not also being passed as an argument.
729 CS.setCalledFunction(NewV);
731 bool PassedAsArg = false;
732 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
733 if (CS.getArgument(i) == V) {
735 CS.setArgument(i, NewV);
739 // Being passed as an argument also. Be careful to not invalidate UI!
743 } else if (CastInst *CI = dyn_cast<CastInst>(I)) {
744 Changed |= OptimizeAwayTrappingUsesOfValue(CI,
745 ConstantExpr::getCast(CI->getOpcode(),
746 NewV, CI->getType()));
747 if (CI->use_empty()) {
749 CI->eraseFromParent();
751 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
752 // Should handle GEP here.
753 SmallVector<Constant*, 8> Idxs;
754 Idxs.reserve(GEPI->getNumOperands()-1);
755 for (User::op_iterator i = GEPI->op_begin() + 1, e = GEPI->op_end();
757 if (Constant *C = dyn_cast<Constant>(*i))
761 if (Idxs.size() == GEPI->getNumOperands()-1)
762 Changed |= OptimizeAwayTrappingUsesOfValue(GEPI,
763 ConstantExpr::getGetElementPtr(NewV, Idxs));
764 if (GEPI->use_empty()) {
766 GEPI->eraseFromParent();
775 /// OptimizeAwayTrappingUsesOfLoads - The specified global has only one non-null
776 /// value stored into it. If there are uses of the loaded value that would trap
777 /// if the loaded value is dynamically null, then we know that they cannot be
778 /// reachable with a null optimize away the load.
779 static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV) {
780 bool Changed = false;
782 // Keep track of whether we are able to remove all the uses of the global
783 // other than the store that defines it.
784 bool AllNonStoreUsesGone = true;
786 // Replace all uses of loads with uses of uses of the stored value.
787 for (Value::use_iterator GUI = GV->use_begin(), E = GV->use_end(); GUI != E;){
788 User *GlobalUser = *GUI++;
789 if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) {
790 Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV);
791 // If we were able to delete all uses of the loads
792 if (LI->use_empty()) {
793 LI->eraseFromParent();
796 AllNonStoreUsesGone = false;
798 } else if (isa<StoreInst>(GlobalUser)) {
799 // Ignore the store that stores "LV" to the global.
800 assert(GlobalUser->getOperand(1) == GV &&
801 "Must be storing *to* the global");
803 AllNonStoreUsesGone = false;
805 // If we get here we could have other crazy uses that are transitively
807 assert((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) ||
808 isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser)) &&
809 "Only expect load and stores!");
814 DEBUG(dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV);
818 // If we nuked all of the loads, then none of the stores are needed either,
819 // nor is the global.
820 if (AllNonStoreUsesGone) {
821 DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n");
822 CleanupConstantGlobalUsers(GV, 0);
823 if (GV->use_empty()) {
824 GV->eraseFromParent();
832 /// ConstantPropUsersOf - Walk the use list of V, constant folding all of the
833 /// instructions that are foldable.
834 static void ConstantPropUsersOf(Value *V) {
835 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; )
836 if (Instruction *I = dyn_cast<Instruction>(*UI++))
837 // FIXME: use TargetData/TargetLibraryInfo for smarter constant folding.
838 if (Constant *NewC = ConstantFoldInstruction(I)) {
839 I->replaceAllUsesWith(NewC);
841 // Advance UI to the next non-I use to avoid invalidating it!
842 // Instructions could multiply use V.
843 while (UI != E && *UI == I)
845 I->eraseFromParent();
849 /// OptimizeGlobalAddressOfMalloc - This function takes the specified global
850 /// variable, and transforms the program as if it always contained the result of
851 /// the specified malloc. Because it is always the result of the specified
852 /// malloc, there is no reason to actually DO the malloc. Instead, turn the
853 /// malloc into a global, and any loads of GV as uses of the new global.
854 static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
857 ConstantInt *NElements,
859 DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n');
862 if (NElements->getZExtValue() == 1)
863 GlobalType = AllocTy;
865 // If we have an array allocation, the global variable is of an array.
866 GlobalType = ArrayType::get(AllocTy, NElements->getZExtValue());
868 // Create the new global variable. The contents of the malloc'd memory is
869 // undefined, so initialize with an undef value.
870 GlobalVariable *NewGV = new GlobalVariable(*GV->getParent(),
872 GlobalValue::InternalLinkage,
873 UndefValue::get(GlobalType),
874 GV->getName()+".body",
876 GV->isThreadLocal());
878 // If there are bitcast users of the malloc (which is typical, usually we have
879 // a malloc + bitcast) then replace them with uses of the new global. Update
880 // other users to use the global as well.
881 BitCastInst *TheBC = 0;
882 while (!CI->use_empty()) {
883 Instruction *User = cast<Instruction>(CI->use_back());
884 if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) {
885 if (BCI->getType() == NewGV->getType()) {
886 BCI->replaceAllUsesWith(NewGV);
887 BCI->eraseFromParent();
889 BCI->setOperand(0, NewGV);
893 TheBC = new BitCastInst(NewGV, CI->getType(), "newgv", CI);
894 User->replaceUsesOfWith(CI, TheBC);
898 Constant *RepValue = NewGV;
899 if (NewGV->getType() != GV->getType()->getElementType())
900 RepValue = ConstantExpr::getBitCast(RepValue,
901 GV->getType()->getElementType());
903 // If there is a comparison against null, we will insert a global bool to
904 // keep track of whether the global was initialized yet or not.
905 GlobalVariable *InitBool =
906 new GlobalVariable(Type::getInt1Ty(GV->getContext()), false,
907 GlobalValue::InternalLinkage,
908 ConstantInt::getFalse(GV->getContext()),
909 GV->getName()+".init", GV->isThreadLocal());
910 bool InitBoolUsed = false;
912 // Loop over all uses of GV, processing them in turn.
913 while (!GV->use_empty()) {
914 if (StoreInst *SI = dyn_cast<StoreInst>(GV->use_back())) {
915 // The global is initialized when the store to it occurs.
916 new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, SI);
917 SI->eraseFromParent();
921 LoadInst *LI = cast<LoadInst>(GV->use_back());
922 while (!LI->use_empty()) {
923 Use &LoadUse = LI->use_begin().getUse();
924 if (!isa<ICmpInst>(LoadUse.getUser())) {
929 ICmpInst *ICI = cast<ICmpInst>(LoadUse.getUser());
930 // Replace the cmp X, 0 with a use of the bool value.
931 Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", ICI);
933 switch (ICI->getPredicate()) {
934 default: llvm_unreachable("Unknown ICmp Predicate!");
935 case ICmpInst::ICMP_ULT:
936 case ICmpInst::ICMP_SLT: // X < null -> always false
937 LV = ConstantInt::getFalse(GV->getContext());
939 case ICmpInst::ICMP_ULE:
940 case ICmpInst::ICMP_SLE:
941 case ICmpInst::ICMP_EQ:
942 LV = BinaryOperator::CreateNot(LV, "notinit", ICI);
944 case ICmpInst::ICMP_NE:
945 case ICmpInst::ICMP_UGE:
946 case ICmpInst::ICMP_SGE:
947 case ICmpInst::ICMP_UGT:
948 case ICmpInst::ICMP_SGT:
951 ICI->replaceAllUsesWith(LV);
952 ICI->eraseFromParent();
954 LI->eraseFromParent();
957 // If the initialization boolean was used, insert it, otherwise delete it.
959 while (!InitBool->use_empty()) // Delete initializations
960 cast<StoreInst>(InitBool->use_back())->eraseFromParent();
963 GV->getParent()->getGlobalList().insert(GV, InitBool);
965 // Now the GV is dead, nuke it and the malloc..
966 GV->eraseFromParent();
967 CI->eraseFromParent();
969 // To further other optimizations, loop over all users of NewGV and try to
970 // constant prop them. This will promote GEP instructions with constant
971 // indices into GEP constant-exprs, which will allow global-opt to hack on it.
972 ConstantPropUsersOf(NewGV);
973 if (RepValue != NewGV)
974 ConstantPropUsersOf(RepValue);
979 /// ValueIsOnlyUsedLocallyOrStoredToOneGlobal - Scan the use-list of V checking
980 /// to make sure that there are no complex uses of V. We permit simple things
981 /// like dereferencing the pointer, but not storing through the address, unless
982 /// it is to the specified global.
983 static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V,
984 const GlobalVariable *GV,
985 SmallPtrSet<const PHINode*, 8> &PHIs) {
986 for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end();
988 const Instruction *Inst = cast<Instruction>(*UI);
990 if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) {
991 continue; // Fine, ignore.
994 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
995 if (SI->getOperand(0) == V && SI->getOperand(1) != GV)
996 return false; // Storing the pointer itself... bad.
997 continue; // Otherwise, storing through it, or storing into GV... fine.
1000 // Must index into the array and into the struct.
1001 if (isa<GetElementPtrInst>(Inst) && Inst->getNumOperands() >= 3) {
1002 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Inst, GV, PHIs))
1007 if (const PHINode *PN = dyn_cast<PHINode>(Inst)) {
1008 // PHIs are ok if all uses are ok. Don't infinitely recurse through PHI
1010 if (PHIs.insert(PN))
1011 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(PN, GV, PHIs))
1016 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Inst)) {
1017 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(BCI, GV, PHIs))
1027 /// ReplaceUsesOfMallocWithGlobal - The Alloc pointer is stored into GV
1028 /// somewhere. Transform all uses of the allocation into loads from the
1029 /// global and uses of the resultant pointer. Further, delete the store into
1030 /// GV. This assumes that these value pass the
1031 /// 'ValueIsOnlyUsedLocallyOrStoredToOneGlobal' predicate.
1032 static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc,
1033 GlobalVariable *GV) {
1034 while (!Alloc->use_empty()) {
1035 Instruction *U = cast<Instruction>(*Alloc->use_begin());
1036 Instruction *InsertPt = U;
1037 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1038 // If this is the store of the allocation into the global, remove it.
1039 if (SI->getOperand(1) == GV) {
1040 SI->eraseFromParent();
1043 } else if (PHINode *PN = dyn_cast<PHINode>(U)) {
1044 // Insert the load in the corresponding predecessor, not right before the
1046 InsertPt = PN->getIncomingBlock(Alloc->use_begin())->getTerminator();
1047 } else if (isa<BitCastInst>(U)) {
1048 // Must be bitcast between the malloc and store to initialize the global.
1049 ReplaceUsesOfMallocWithGlobal(U, GV);
1050 U->eraseFromParent();
1052 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
1053 // If this is a "GEP bitcast" and the user is a store to the global, then
1054 // just process it as a bitcast.
1055 if (GEPI->hasAllZeroIndices() && GEPI->hasOneUse())
1056 if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->use_back()))
1057 if (SI->getOperand(1) == GV) {
1058 // Must be bitcast GEP between the malloc and store to initialize
1060 ReplaceUsesOfMallocWithGlobal(GEPI, GV);
1061 GEPI->eraseFromParent();
1066 // Insert a load from the global, and use it instead of the malloc.
1067 Value *NL = new LoadInst(GV, GV->getName()+".val", InsertPt);
1068 U->replaceUsesOfWith(Alloc, NL);
1072 /// LoadUsesSimpleEnoughForHeapSRA - Verify that all uses of V (a load, or a phi
1073 /// of a load) are simple enough to perform heap SRA on. This permits GEP's
1074 /// that index through the array and struct field, icmps of null, and PHIs.
1075 static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V,
1076 SmallPtrSet<const PHINode*, 32> &LoadUsingPHIs,
1077 SmallPtrSet<const PHINode*, 32> &LoadUsingPHIsPerLoad) {
1078 // We permit two users of the load: setcc comparing against the null
1079 // pointer, and a getelementptr of a specific form.
1080 for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;
1082 const Instruction *User = cast<Instruction>(*UI);
1084 // Comparison against null is ok.
1085 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(User)) {
1086 if (!isa<ConstantPointerNull>(ICI->getOperand(1)))
1091 // getelementptr is also ok, but only a simple form.
1092 if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) {
1093 // Must index into the array and into the struct.
1094 if (GEPI->getNumOperands() < 3)
1097 // Otherwise the GEP is ok.
1101 if (const PHINode *PN = dyn_cast<PHINode>(User)) {
1102 if (!LoadUsingPHIsPerLoad.insert(PN))
1103 // This means some phi nodes are dependent on each other.
1104 // Avoid infinite looping!
1106 if (!LoadUsingPHIs.insert(PN))
1107 // If we have already analyzed this PHI, then it is safe.
1110 // Make sure all uses of the PHI are simple enough to transform.
1111 if (!LoadUsesSimpleEnoughForHeapSRA(PN,
1112 LoadUsingPHIs, LoadUsingPHIsPerLoad))
1118 // Otherwise we don't know what this is, not ok.
1126 /// AllGlobalLoadUsesSimpleEnoughForHeapSRA - If all users of values loaded from
1127 /// GV are simple enough to perform HeapSRA, return true.
1128 static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable *GV,
1129 Instruction *StoredVal) {
1130 SmallPtrSet<const PHINode*, 32> LoadUsingPHIs;
1131 SmallPtrSet<const PHINode*, 32> LoadUsingPHIsPerLoad;
1132 for (Value::const_use_iterator UI = GV->use_begin(), E = GV->use_end();
1134 if (const LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
1135 if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs,
1136 LoadUsingPHIsPerLoad))
1138 LoadUsingPHIsPerLoad.clear();
1141 // If we reach here, we know that all uses of the loads and transitive uses
1142 // (through PHI nodes) are simple enough to transform. However, we don't know
1143 // that all inputs the to the PHI nodes are in the same equivalence sets.
1144 // Check to verify that all operands of the PHIs are either PHIS that can be
1145 // transformed, loads from GV, or MI itself.
1146 for (SmallPtrSet<const PHINode*, 32>::const_iterator I = LoadUsingPHIs.begin()
1147 , E = LoadUsingPHIs.end(); I != E; ++I) {
1148 const PHINode *PN = *I;
1149 for (unsigned op = 0, e = PN->getNumIncomingValues(); op != e; ++op) {
1150 Value *InVal = PN->getIncomingValue(op);
1152 // PHI of the stored value itself is ok.
1153 if (InVal == StoredVal) continue;
1155 if (const PHINode *InPN = dyn_cast<PHINode>(InVal)) {
1156 // One of the PHIs in our set is (optimistically) ok.
1157 if (LoadUsingPHIs.count(InPN))
1162 // Load from GV is ok.
1163 if (const LoadInst *LI = dyn_cast<LoadInst>(InVal))
1164 if (LI->getOperand(0) == GV)
1169 // Anything else is rejected.
1177 static Value *GetHeapSROAValue(Value *V, unsigned FieldNo,
1178 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
1179 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) {
1180 std::vector<Value*> &FieldVals = InsertedScalarizedValues[V];
1182 if (FieldNo >= FieldVals.size())
1183 FieldVals.resize(FieldNo+1);
1185 // If we already have this value, just reuse the previously scalarized
1187 if (Value *FieldVal = FieldVals[FieldNo])
1190 // Depending on what instruction this is, we have several cases.
1192 if (LoadInst *LI = dyn_cast<LoadInst>(V)) {
1193 // This is a scalarized version of the load from the global. Just create
1194 // a new Load of the scalarized global.
1195 Result = new LoadInst(GetHeapSROAValue(LI->getOperand(0), FieldNo,
1196 InsertedScalarizedValues,
1198 LI->getName()+".f"+Twine(FieldNo), LI);
1199 } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
1200 // PN's type is pointer to struct. Make a new PHI of pointer to struct
1203 cast<StructType>(cast<PointerType>(PN->getType())->getElementType());
1206 PHINode::Create(PointerType::getUnqual(ST->getElementType(FieldNo)),
1207 PN->getNumIncomingValues(),
1208 PN->getName()+".f"+Twine(FieldNo), PN);
1210 PHIsToRewrite.push_back(std::make_pair(PN, FieldNo));
1212 llvm_unreachable("Unknown usable value");
1216 return FieldVals[FieldNo] = Result;
1219 /// RewriteHeapSROALoadUser - Given a load instruction and a value derived from
1220 /// the load, rewrite the derived value to use the HeapSRoA'd load.
1221 static void RewriteHeapSROALoadUser(Instruction *LoadUser,
1222 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
1223 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) {
1224 // If this is a comparison against null, handle it.
1225 if (ICmpInst *SCI = dyn_cast<ICmpInst>(LoadUser)) {
1226 assert(isa<ConstantPointerNull>(SCI->getOperand(1)));
1227 // If we have a setcc of the loaded pointer, we can use a setcc of any
1229 Value *NPtr = GetHeapSROAValue(SCI->getOperand(0), 0,
1230 InsertedScalarizedValues, PHIsToRewrite);
1232 Value *New = new ICmpInst(SCI, SCI->getPredicate(), NPtr,
1233 Constant::getNullValue(NPtr->getType()),
1235 SCI->replaceAllUsesWith(New);
1236 SCI->eraseFromParent();
1240 // Handle 'getelementptr Ptr, Idx, i32 FieldNo ...'
1241 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(LoadUser)) {
1242 assert(GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2))
1243 && "Unexpected GEPI!");
1245 // Load the pointer for this field.
1246 unsigned FieldNo = cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue();
1247 Value *NewPtr = GetHeapSROAValue(GEPI->getOperand(0), FieldNo,
1248 InsertedScalarizedValues, PHIsToRewrite);
1250 // Create the new GEP idx vector.
1251 SmallVector<Value*, 8> GEPIdx;
1252 GEPIdx.push_back(GEPI->getOperand(1));
1253 GEPIdx.append(GEPI->op_begin()+3, GEPI->op_end());
1255 Value *NGEPI = GetElementPtrInst::Create(NewPtr, GEPIdx,
1256 GEPI->getName(), GEPI);
1257 GEPI->replaceAllUsesWith(NGEPI);
1258 GEPI->eraseFromParent();
1262 // Recursively transform the users of PHI nodes. This will lazily create the
1263 // PHIs that are needed for individual elements. Keep track of what PHIs we
1264 // see in InsertedScalarizedValues so that we don't get infinite loops (very
1265 // antisocial). If the PHI is already in InsertedScalarizedValues, it has
1266 // already been seen first by another load, so its uses have already been
1268 PHINode *PN = cast<PHINode>(LoadUser);
1269 if (!InsertedScalarizedValues.insert(std::make_pair(PN,
1270 std::vector<Value*>())).second)
1273 // If this is the first time we've seen this PHI, recursively process all
1275 for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end(); UI != E; ) {
1276 Instruction *User = cast<Instruction>(*UI++);
1277 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite);
1281 /// RewriteUsesOfLoadForHeapSRoA - We are performing Heap SRoA on a global. Ptr
1282 /// is a value loaded from the global. Eliminate all uses of Ptr, making them
1283 /// use FieldGlobals instead. All uses of loaded values satisfy
1284 /// AllGlobalLoadUsesSimpleEnoughForHeapSRA.
1285 static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
1286 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
1287 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) {
1288 for (Value::use_iterator UI = Load->use_begin(), E = Load->use_end();
1290 Instruction *User = cast<Instruction>(*UI++);
1291 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite);
1294 if (Load->use_empty()) {
1295 Load->eraseFromParent();
1296 InsertedScalarizedValues.erase(Load);
1300 /// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break
1301 /// it up into multiple allocations of arrays of the fields.
1302 static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
1303 Value* NElems, TargetData *TD) {
1304 DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n');
1305 Type* MAT = getMallocAllocatedType(CI);
1306 StructType *STy = cast<StructType>(MAT);
1308 // There is guaranteed to be at least one use of the malloc (storing
1309 // it into GV). If there are other uses, change them to be uses of
1310 // the global to simplify later code. This also deletes the store
1312 ReplaceUsesOfMallocWithGlobal(CI, GV);
1314 // Okay, at this point, there are no users of the malloc. Insert N
1315 // new mallocs at the same place as CI, and N globals.
1316 std::vector<Value*> FieldGlobals;
1317 std::vector<Value*> FieldMallocs;
1319 for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){
1320 Type *FieldTy = STy->getElementType(FieldNo);
1321 PointerType *PFieldTy = PointerType::getUnqual(FieldTy);
1323 GlobalVariable *NGV =
1324 new GlobalVariable(*GV->getParent(),
1325 PFieldTy, false, GlobalValue::InternalLinkage,
1326 Constant::getNullValue(PFieldTy),
1327 GV->getName() + ".f" + Twine(FieldNo), GV,
1328 GV->isThreadLocal());
1329 FieldGlobals.push_back(NGV);
1331 unsigned TypeSize = TD->getTypeAllocSize(FieldTy);
1332 if (StructType *ST = dyn_cast<StructType>(FieldTy))
1333 TypeSize = TD->getStructLayout(ST)->getSizeInBytes();
1334 Type *IntPtrTy = TD->getIntPtrType(CI->getContext());
1335 Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy,
1336 ConstantInt::get(IntPtrTy, TypeSize),
1338 CI->getName() + ".f" + Twine(FieldNo));
1339 FieldMallocs.push_back(NMI);
1340 new StoreInst(NMI, NGV, CI);
1343 // The tricky aspect of this transformation is handling the case when malloc
1344 // fails. In the original code, malloc failing would set the result pointer
1345 // of malloc to null. In this case, some mallocs could succeed and others
1346 // could fail. As such, we emit code that looks like this:
1347 // F0 = malloc(field0)
1348 // F1 = malloc(field1)
1349 // F2 = malloc(field2)
1350 // if (F0 == 0 || F1 == 0 || F2 == 0) {
1351 // if (F0) { free(F0); F0 = 0; }
1352 // if (F1) { free(F1); F1 = 0; }
1353 // if (F2) { free(F2); F2 = 0; }
1355 // The malloc can also fail if its argument is too large.
1356 Constant *ConstantZero = ConstantInt::get(CI->getArgOperand(0)->getType(), 0);
1357 Value *RunningOr = new ICmpInst(CI, ICmpInst::ICMP_SLT, CI->getArgOperand(0),
1358 ConstantZero, "isneg");
1359 for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) {
1360 Value *Cond = new ICmpInst(CI, ICmpInst::ICMP_EQ, FieldMallocs[i],
1361 Constant::getNullValue(FieldMallocs[i]->getType()),
1363 RunningOr = BinaryOperator::CreateOr(RunningOr, Cond, "tmp", CI);
1366 // Split the basic block at the old malloc.
1367 BasicBlock *OrigBB = CI->getParent();
1368 BasicBlock *ContBB = OrigBB->splitBasicBlock(CI, "malloc_cont");
1370 // Create the block to check the first condition. Put all these blocks at the
1371 // end of the function as they are unlikely to be executed.
1372 BasicBlock *NullPtrBlock = BasicBlock::Create(OrigBB->getContext(),
1374 OrigBB->getParent());
1376 // Remove the uncond branch from OrigBB to ContBB, turning it into a cond
1377 // branch on RunningOr.
1378 OrigBB->getTerminator()->eraseFromParent();
1379 BranchInst::Create(NullPtrBlock, ContBB, RunningOr, OrigBB);
1381 // Within the NullPtrBlock, we need to emit a comparison and branch for each
1382 // pointer, because some may be null while others are not.
1383 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
1384 Value *GVVal = new LoadInst(FieldGlobals[i], "tmp", NullPtrBlock);
1385 Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal,
1386 Constant::getNullValue(GVVal->getType()));
1387 BasicBlock *FreeBlock = BasicBlock::Create(Cmp->getContext(), "free_it",
1388 OrigBB->getParent());
1389 BasicBlock *NextBlock = BasicBlock::Create(Cmp->getContext(), "next",
1390 OrigBB->getParent());
1391 Instruction *BI = BranchInst::Create(FreeBlock, NextBlock,
1394 // Fill in FreeBlock.
1395 CallInst::CreateFree(GVVal, BI);
1396 new StoreInst(Constant::getNullValue(GVVal->getType()), FieldGlobals[i],
1398 BranchInst::Create(NextBlock, FreeBlock);
1400 NullPtrBlock = NextBlock;
1403 BranchInst::Create(ContBB, NullPtrBlock);
1405 // CI is no longer needed, remove it.
1406 CI->eraseFromParent();
1408 /// InsertedScalarizedLoads - As we process loads, if we can't immediately
1409 /// update all uses of the load, keep track of what scalarized loads are
1410 /// inserted for a given load.
1411 DenseMap<Value*, std::vector<Value*> > InsertedScalarizedValues;
1412 InsertedScalarizedValues[GV] = FieldGlobals;
1414 std::vector<std::pair<PHINode*, unsigned> > PHIsToRewrite;
1416 // Okay, the malloc site is completely handled. All of the uses of GV are now
1417 // loads, and all uses of those loads are simple. Rewrite them to use loads
1418 // of the per-field globals instead.
1419 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); UI != E;) {
1420 Instruction *User = cast<Instruction>(*UI++);
1422 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
1423 RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite);
1427 // Must be a store of null.
1428 StoreInst *SI = cast<StoreInst>(User);
1429 assert(isa<ConstantPointerNull>(SI->getOperand(0)) &&
1430 "Unexpected heap-sra user!");
1432 // Insert a store of null into each global.
1433 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
1434 PointerType *PT = cast<PointerType>(FieldGlobals[i]->getType());
1435 Constant *Null = Constant::getNullValue(PT->getElementType());
1436 new StoreInst(Null, FieldGlobals[i], SI);
1438 // Erase the original store.
1439 SI->eraseFromParent();
1442 // While we have PHIs that are interesting to rewrite, do it.
1443 while (!PHIsToRewrite.empty()) {
1444 PHINode *PN = PHIsToRewrite.back().first;
1445 unsigned FieldNo = PHIsToRewrite.back().second;
1446 PHIsToRewrite.pop_back();
1447 PHINode *FieldPN = cast<PHINode>(InsertedScalarizedValues[PN][FieldNo]);
1448 assert(FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi");
1450 // Add all the incoming values. This can materialize more phis.
1451 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1452 Value *InVal = PN->getIncomingValue(i);
1453 InVal = GetHeapSROAValue(InVal, FieldNo, InsertedScalarizedValues,
1455 FieldPN->addIncoming(InVal, PN->getIncomingBlock(i));
1459 // Drop all inter-phi links and any loads that made it this far.
1460 for (DenseMap<Value*, std::vector<Value*> >::iterator
1461 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end();
1463 if (PHINode *PN = dyn_cast<PHINode>(I->first))
1464 PN->dropAllReferences();
1465 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first))
1466 LI->dropAllReferences();
1469 // Delete all the phis and loads now that inter-references are dead.
1470 for (DenseMap<Value*, std::vector<Value*> >::iterator
1471 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end();
1473 if (PHINode *PN = dyn_cast<PHINode>(I->first))
1474 PN->eraseFromParent();
1475 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first))
1476 LI->eraseFromParent();
1479 // The old global is now dead, remove it.
1480 GV->eraseFromParent();
1483 return cast<GlobalVariable>(FieldGlobals[0]);
1486 /// TryToOptimizeStoreOfMallocToGlobal - This function is called when we see a
1487 /// pointer global variable with a single value stored it that is a malloc or
1489 static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
1492 Module::global_iterator &GVI,
1497 // If this is a malloc of an abstract type, don't touch it.
1498 if (!AllocTy->isSized())
1501 // We can't optimize this global unless all uses of it are *known* to be
1502 // of the malloc value, not of the null initializer value (consider a use
1503 // that compares the global's value against zero to see if the malloc has
1504 // been reached). To do this, we check to see if all uses of the global
1505 // would trap if the global were null: this proves that they must all
1506 // happen after the malloc.
1507 if (!AllUsesOfLoadedValueWillTrapIfNull(GV))
1510 // We can't optimize this if the malloc itself is used in a complex way,
1511 // for example, being stored into multiple globals. This allows the
1512 // malloc to be stored into the specified global, loaded setcc'd, and
1513 // GEP'd. These are all things we could transform to using the global
1515 SmallPtrSet<const PHINode*, 8> PHIs;
1516 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(CI, GV, PHIs))
1519 // If we have a global that is only initialized with a fixed size malloc,
1520 // transform the program to use global memory instead of malloc'd memory.
1521 // This eliminates dynamic allocation, avoids an indirection accessing the
1522 // data, and exposes the resultant global to further GlobalOpt.
1523 // We cannot optimize the malloc if we cannot determine malloc array size.
1524 Value *NElems = getMallocArraySize(CI, TD, true);
1528 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems))
1529 // Restrict this transformation to only working on small allocations
1530 // (2048 bytes currently), as we don't want to introduce a 16M global or
1532 if (NElements->getZExtValue() * TD->getTypeAllocSize(AllocTy) < 2048) {
1533 GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, TD);
1537 // If the allocation is an array of structures, consider transforming this
1538 // into multiple malloc'd arrays, one for each field. This is basically
1539 // SRoA for malloc'd memory.
1541 // If this is an allocation of a fixed size array of structs, analyze as a
1542 // variable size array. malloc [100 x struct],1 -> malloc struct, 100
1543 if (NElems == ConstantInt::get(CI->getArgOperand(0)->getType(), 1))
1544 if (ArrayType *AT = dyn_cast<ArrayType>(AllocTy))
1545 AllocTy = AT->getElementType();
1547 StructType *AllocSTy = dyn_cast<StructType>(AllocTy);
1551 // This the structure has an unreasonable number of fields, leave it
1553 if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 &&
1554 AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, CI)) {
1556 // If this is a fixed size array, transform the Malloc to be an alloc of
1557 // structs. malloc [100 x struct],1 -> malloc struct, 100
1558 if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI))) {
1559 Type *IntPtrTy = TD->getIntPtrType(CI->getContext());
1560 unsigned TypeSize = TD->getStructLayout(AllocSTy)->getSizeInBytes();
1561 Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize);
1562 Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements());
1563 Instruction *Malloc = CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy,
1564 AllocSize, NumElements,
1566 Instruction *Cast = new BitCastInst(Malloc, CI->getType(), "tmp", CI);
1567 CI->replaceAllUsesWith(Cast);
1568 CI->eraseFromParent();
1569 CI = dyn_cast<BitCastInst>(Malloc) ?
1570 extractMallocCallFromBitCast(Malloc) : cast<CallInst>(Malloc);
1573 GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, TD, true),TD);
1580 // OptimizeOnceStoredGlobal - Try to optimize globals based on the knowledge
1581 // that only one value (besides its initializer) is ever stored to the global.
1582 static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
1583 Module::global_iterator &GVI,
1585 // Ignore no-op GEPs and bitcasts.
1586 StoredOnceVal = StoredOnceVal->stripPointerCasts();
1588 // If we are dealing with a pointer global that is initialized to null and
1589 // only has one (non-null) value stored into it, then we can optimize any
1590 // users of the loaded value (often calls and loads) that would trap if the
1592 if (GV->getInitializer()->getType()->isPointerTy() &&
1593 GV->getInitializer()->isNullValue()) {
1594 if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) {
1595 if (GV->getInitializer()->getType() != SOVC->getType())
1596 SOVC = ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType());
1598 // Optimize away any trapping uses of the loaded value.
1599 if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC))
1601 } else if (CallInst *CI = extractMallocCall(StoredOnceVal)) {
1602 Type* MallocType = getMallocAllocatedType(CI);
1603 if (MallocType && TryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType,
1612 /// TryToShrinkGlobalToBoolean - At this point, we have learned that the only
1613 /// two values ever stored into GV are its initializer and OtherVal. See if we
1614 /// can shrink the global into a boolean and select between the two values
1615 /// whenever it is used. This exposes the values to other scalar optimizations.
1616 static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
1617 Type *GVElType = GV->getType()->getElementType();
1619 // If GVElType is already i1, it is already shrunk. If the type of the GV is
1620 // an FP value, pointer or vector, don't do this optimization because a select
1621 // between them is very expensive and unlikely to lead to later
1622 // simplification. In these cases, we typically end up with "cond ? v1 : v2"
1623 // where v1 and v2 both require constant pool loads, a big loss.
1624 if (GVElType == Type::getInt1Ty(GV->getContext()) ||
1625 GVElType->isFloatingPointTy() ||
1626 GVElType->isPointerTy() || GVElType->isVectorTy())
1629 // Walk the use list of the global seeing if all the uses are load or store.
1630 // If there is anything else, bail out.
1631 for (Value::use_iterator I = GV->use_begin(), E = GV->use_end(); I != E; ++I){
1633 if (!isa<LoadInst>(U) && !isa<StoreInst>(U))
1637 DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV);
1639 // Create the new global, initializing it to false.
1640 GlobalVariable *NewGV = new GlobalVariable(Type::getInt1Ty(GV->getContext()),
1642 GlobalValue::InternalLinkage,
1643 ConstantInt::getFalse(GV->getContext()),
1645 GV->isThreadLocal());
1646 GV->getParent()->getGlobalList().insert(GV, NewGV);
1648 Constant *InitVal = GV->getInitializer();
1649 assert(InitVal->getType() != Type::getInt1Ty(GV->getContext()) &&
1650 "No reason to shrink to bool!");
1652 // If initialized to zero and storing one into the global, we can use a cast
1653 // instead of a select to synthesize the desired value.
1654 bool IsOneZero = false;
1655 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal))
1656 IsOneZero = InitVal->isNullValue() && CI->isOne();
1658 while (!GV->use_empty()) {
1659 Instruction *UI = cast<Instruction>(GV->use_back());
1660 if (StoreInst *SI = dyn_cast<StoreInst>(UI)) {
1661 // Change the store into a boolean store.
1662 bool StoringOther = SI->getOperand(0) == OtherVal;
1663 // Only do this if we weren't storing a loaded value.
1665 if (StoringOther || SI->getOperand(0) == InitVal)
1666 StoreVal = ConstantInt::get(Type::getInt1Ty(GV->getContext()),
1669 // Otherwise, we are storing a previously loaded copy. To do this,
1670 // change the copy from copying the original value to just copying the
1672 Instruction *StoredVal = cast<Instruction>(SI->getOperand(0));
1674 // If we've already replaced the input, StoredVal will be a cast or
1675 // select instruction. If not, it will be a load of the original
1677 if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
1678 assert(LI->getOperand(0) == GV && "Not a copy!");
1679 // Insert a new load, to preserve the saved value.
1680 StoreVal = new LoadInst(NewGV, LI->getName()+".b", LI);
1682 assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) &&
1683 "This is not a form that we understand!");
1684 StoreVal = StoredVal->getOperand(0);
1685 assert(isa<LoadInst>(StoreVal) && "Not a load of NewGV!");
1688 new StoreInst(StoreVal, NewGV, SI);
1690 // Change the load into a load of bool then a select.
1691 LoadInst *LI = cast<LoadInst>(UI);
1692 LoadInst *NLI = new LoadInst(NewGV, LI->getName()+".b", LI);
1695 NSI = new ZExtInst(NLI, LI->getType(), "", LI);
1697 NSI = SelectInst::Create(NLI, OtherVal, InitVal, "", LI);
1699 LI->replaceAllUsesWith(NSI);
1701 UI->eraseFromParent();
1704 GV->eraseFromParent();
1709 /// ProcessInternalGlobal - Analyze the specified global variable and optimize
1710 /// it if possible. If we make a change, return true.
1711 bool GlobalOpt::ProcessGlobal(GlobalVariable *GV,
1712 Module::global_iterator &GVI) {
1713 if (!GV->hasLocalLinkage())
1716 // Do more involved optimizations if the global is internal.
1717 GV->removeDeadConstantUsers();
1719 if (GV->use_empty()) {
1720 DEBUG(dbgs() << "GLOBAL DEAD: " << *GV);
1721 GV->eraseFromParent();
1726 SmallPtrSet<const PHINode*, 16> PHIUsers;
1729 if (AnalyzeGlobal(GV, GS, PHIUsers))
1732 if (!GS.isCompared && !GV->hasUnnamedAddr()) {
1733 GV->setUnnamedAddr(true);
1737 if (GV->isConstant() || !GV->hasInitializer())
1740 return ProcessInternalGlobal(GV, GVI, PHIUsers, GS);
1743 /// ProcessInternalGlobal - Analyze the specified global variable and optimize
1744 /// it if possible. If we make a change, return true.
1745 bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
1746 Module::global_iterator &GVI,
1747 const SmallPtrSet<const PHINode*, 16> &PHIUsers,
1748 const GlobalStatus &GS) {
1749 // If this is a first class global and has only one accessing function
1750 // and this function is main (which we know is not recursive we can make
1751 // this global a local variable) we replace the global with a local alloca
1752 // in this function.
1754 // NOTE: It doesn't make sense to promote non single-value types since we
1755 // are just replacing static memory to stack memory.
1757 // If the global is in different address space, don't bring it to stack.
1758 if (!GS.HasMultipleAccessingFunctions &&
1759 GS.AccessingFunction && !GS.HasNonInstructionUser &&
1760 GV->getType()->getElementType()->isSingleValueType() &&
1761 GS.AccessingFunction->getName() == "main" &&
1762 GS.AccessingFunction->hasExternalLinkage() &&
1763 GV->getType()->getAddressSpace() == 0) {
1764 DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV);
1765 Instruction& FirstI = const_cast<Instruction&>(*GS.AccessingFunction
1766 ->getEntryBlock().begin());
1767 Type* ElemTy = GV->getType()->getElementType();
1768 // FIXME: Pass Global's alignment when globals have alignment
1769 AllocaInst* Alloca = new AllocaInst(ElemTy, NULL, GV->getName(), &FirstI);
1770 if (!isa<UndefValue>(GV->getInitializer()))
1771 new StoreInst(GV->getInitializer(), Alloca, &FirstI);
1773 GV->replaceAllUsesWith(Alloca);
1774 GV->eraseFromParent();
1779 // If the global is never loaded (but may be stored to), it is dead.
1782 DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV);
1784 // Delete any stores we can find to the global. We may not be able to
1785 // make it completely dead though.
1786 bool Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer());
1788 // If the global is dead now, delete it.
1789 if (GV->use_empty()) {
1790 GV->eraseFromParent();
1796 } else if (GS.StoredType <= GlobalStatus::isInitializerStored) {
1797 DEBUG(dbgs() << "MARKING CONSTANT: " << *GV);
1798 GV->setConstant(true);
1800 // Clean up any obviously simplifiable users now.
1801 CleanupConstantGlobalUsers(GV, GV->getInitializer());
1803 // If the global is dead now, just nuke it.
1804 if (GV->use_empty()) {
1805 DEBUG(dbgs() << " *** Marking constant allowed us to simplify "
1806 << "all users and delete global!\n");
1807 GV->eraseFromParent();
1813 } else if (!GV->getInitializer()->getType()->isSingleValueType()) {
1814 if (TargetData *TD = getAnalysisIfAvailable<TargetData>())
1815 if (GlobalVariable *FirstNewGV = SRAGlobal(GV, *TD)) {
1816 GVI = FirstNewGV; // Don't skip the newly produced globals!
1819 } else if (GS.StoredType == GlobalStatus::isStoredOnce) {
1820 // If the initial value for the global was an undef value, and if only
1821 // one other value was stored into it, we can just change the
1822 // initializer to be the stored value, then delete all stores to the
1823 // global. This allows us to mark it constant.
1824 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue))
1825 if (isa<UndefValue>(GV->getInitializer())) {
1826 // Change the initial value here.
1827 GV->setInitializer(SOVConstant);
1829 // Clean up any obviously simplifiable users now.
1830 CleanupConstantGlobalUsers(GV, GV->getInitializer());
1832 if (GV->use_empty()) {
1833 DEBUG(dbgs() << " *** Substituting initializer allowed us to "
1834 << "simplify all users and delete global!\n");
1835 GV->eraseFromParent();
1844 // Try to optimize globals based on the knowledge that only one value
1845 // (besides its initializer) is ever stored to the global.
1846 if (OptimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GVI,
1847 getAnalysisIfAvailable<TargetData>()))
1850 // Otherwise, if the global was not a boolean, we can shrink it to be a
1852 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue))
1853 if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) {
1862 /// ChangeCalleesToFastCall - Walk all of the direct calls of the specified
1863 /// function, changing them to FastCC.
1864 static void ChangeCalleesToFastCall(Function *F) {
1865 for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){
1866 CallSite User(cast<Instruction>(*UI));
1867 User.setCallingConv(CallingConv::Fast);
1871 static AttrListPtr StripNest(const AttrListPtr &Attrs) {
1872 for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) {
1873 if ((Attrs.getSlot(i).Attrs & Attribute::Nest) == 0)
1876 // There can be only one.
1877 return Attrs.removeAttr(Attrs.getSlot(i).Index, Attribute::Nest);
1883 static void RemoveNestAttribute(Function *F) {
1884 F->setAttributes(StripNest(F->getAttributes()));
1885 for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){
1886 CallSite User(cast<Instruction>(*UI));
1887 User.setAttributes(StripNest(User.getAttributes()));
1891 bool GlobalOpt::OptimizeFunctions(Module &M) {
1892 bool Changed = false;
1893 // Optimize functions.
1894 for (Module::iterator FI = M.begin(), E = M.end(); FI != E; ) {
1896 // Functions without names cannot be referenced outside this module.
1897 if (!F->hasName() && !F->isDeclaration())
1898 F->setLinkage(GlobalValue::InternalLinkage);
1899 F->removeDeadConstantUsers();
1900 if (F->isDefTriviallyDead()) {
1901 F->eraseFromParent();
1904 } else if (F->hasLocalLinkage()) {
1905 if (F->getCallingConv() == CallingConv::C && !F->isVarArg() &&
1906 !F->hasAddressTaken()) {
1907 // If this function has C calling conventions, is not a varargs
1908 // function, and is only called directly, promote it to use the Fast
1909 // calling convention.
1910 F->setCallingConv(CallingConv::Fast);
1911 ChangeCalleesToFastCall(F);
1916 if (F->getAttributes().hasAttrSomewhere(Attribute::Nest) &&
1917 !F->hasAddressTaken()) {
1918 // The function is not used by a trampoline intrinsic, so it is safe
1919 // to remove the 'nest' attribute.
1920 RemoveNestAttribute(F);
1929 bool GlobalOpt::OptimizeGlobalVars(Module &M) {
1930 bool Changed = false;
1931 for (Module::global_iterator GVI = M.global_begin(), E = M.global_end();
1933 GlobalVariable *GV = GVI++;
1934 // Global variables without names cannot be referenced outside this module.
1935 if (!GV->hasName() && !GV->isDeclaration())
1936 GV->setLinkage(GlobalValue::InternalLinkage);
1937 // Simplify the initializer.
1938 if (GV->hasInitializer())
1939 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GV->getInitializer())) {
1940 TargetData *TD = getAnalysisIfAvailable<TargetData>();
1941 TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
1942 Constant *New = ConstantFoldConstantExpression(CE, TD, TLI);
1943 if (New && New != CE)
1944 GV->setInitializer(New);
1947 Changed |= ProcessGlobal(GV, GVI);
1952 /// FindGlobalCtors - Find the llvm.global_ctors list, verifying that all
1953 /// initializers have an init priority of 65535.
1954 GlobalVariable *GlobalOpt::FindGlobalCtors(Module &M) {
1955 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
1956 if (GV == 0) return 0;
1958 // Verify that the initializer is simple enough for us to handle. We are
1959 // only allowed to optimize the initializer if it is unique.
1960 if (!GV->hasUniqueInitializer()) return 0;
1962 if (isa<ConstantAggregateZero>(GV->getInitializer()))
1964 ConstantArray *CA = cast<ConstantArray>(GV->getInitializer());
1966 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) {
1967 if (isa<ConstantAggregateZero>(*i))
1969 ConstantStruct *CS = cast<ConstantStruct>(*i);
1970 if (isa<ConstantPointerNull>(CS->getOperand(1)))
1973 // Must have a function or null ptr.
1974 if (!isa<Function>(CS->getOperand(1)))
1977 // Init priority must be standard.
1978 ConstantInt *CI = cast<ConstantInt>(CS->getOperand(0));
1979 if (CI->getZExtValue() != 65535)
1986 /// ParseGlobalCtors - Given a llvm.global_ctors list that we can understand,
1987 /// return a list of the functions and null terminator as a vector.
1988 static std::vector<Function*> ParseGlobalCtors(GlobalVariable *GV) {
1989 if (GV->getInitializer()->isNullValue())
1990 return std::vector<Function*>();
1991 ConstantArray *CA = cast<ConstantArray>(GV->getInitializer());
1992 std::vector<Function*> Result;
1993 Result.reserve(CA->getNumOperands());
1994 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) {
1995 ConstantStruct *CS = cast<ConstantStruct>(*i);
1996 Result.push_back(dyn_cast<Function>(CS->getOperand(1)));
2001 /// InstallGlobalCtors - Given a specified llvm.global_ctors list, install the
2002 /// specified array, returning the new global to use.
2003 static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL,
2004 const std::vector<Function*> &Ctors) {
2005 // If we made a change, reassemble the initializer list.
2006 Constant *CSVals[2];
2007 CSVals[0] = ConstantInt::get(Type::getInt32Ty(GCL->getContext()), 65535);
2010 StructType *StructTy =
2012 cast<ArrayType>(GCL->getType()->getElementType())->getElementType());
2014 // Create the new init list.
2015 std::vector<Constant*> CAList;
2016 for (unsigned i = 0, e = Ctors.size(); i != e; ++i) {
2018 CSVals[1] = Ctors[i];
2020 Type *FTy = FunctionType::get(Type::getVoidTy(GCL->getContext()),
2022 PointerType *PFTy = PointerType::getUnqual(FTy);
2023 CSVals[1] = Constant::getNullValue(PFTy);
2024 CSVals[0] = ConstantInt::get(Type::getInt32Ty(GCL->getContext()),
2027 CAList.push_back(ConstantStruct::get(StructTy, CSVals));
2030 // Create the array initializer.
2031 Constant *CA = ConstantArray::get(ArrayType::get(StructTy,
2032 CAList.size()), CAList);
2034 // If we didn't change the number of elements, don't create a new GV.
2035 if (CA->getType() == GCL->getInitializer()->getType()) {
2036 GCL->setInitializer(CA);
2040 // Create the new global and insert it next to the existing list.
2041 GlobalVariable *NGV = new GlobalVariable(CA->getType(), GCL->isConstant(),
2042 GCL->getLinkage(), CA, "",
2043 GCL->isThreadLocal());
2044 GCL->getParent()->getGlobalList().insert(GCL, NGV);
2047 // Nuke the old list, replacing any uses with the new one.
2048 if (!GCL->use_empty()) {
2050 if (V->getType() != GCL->getType())
2051 V = ConstantExpr::getBitCast(V, GCL->getType());
2052 GCL->replaceAllUsesWith(V);
2054 GCL->eraseFromParent();
2063 static Constant *getVal(DenseMap<Value*, Constant*> &ComputedValues, Value *V) {
2064 if (Constant *CV = dyn_cast<Constant>(V)) return CV;
2065 Constant *R = ComputedValues[V];
2066 assert(R && "Reference to an uncomputed value!");
2071 isSimpleEnoughValueToCommit(Constant *C,
2072 SmallPtrSet<Constant*, 8> &SimpleConstants);
2075 /// isSimpleEnoughValueToCommit - Return true if the specified constant can be
2076 /// handled by the code generator. We don't want to generate something like:
2077 /// void *X = &X/42;
2078 /// because the code generator doesn't have a relocation that can handle that.
2080 /// This function should be called if C was not found (but just got inserted)
2081 /// in SimpleConstants to avoid having to rescan the same constants all the
2083 static bool isSimpleEnoughValueToCommitHelper(Constant *C,
2084 SmallPtrSet<Constant*, 8> &SimpleConstants) {
2085 // Simple integer, undef, constant aggregate zero, global addresses, etc are
2087 if (C->getNumOperands() == 0 || isa<BlockAddress>(C) ||
2088 isa<GlobalValue>(C))
2091 // Aggregate values are safe if all their elements are.
2092 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C) ||
2093 isa<ConstantVector>(C)) {
2094 for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) {
2095 Constant *Op = cast<Constant>(C->getOperand(i));
2096 if (!isSimpleEnoughValueToCommit(Op, SimpleConstants))
2102 // We don't know exactly what relocations are allowed in constant expressions,
2103 // so we allow &global+constantoffset, which is safe and uniformly supported
2105 ConstantExpr *CE = cast<ConstantExpr>(C);
2106 switch (CE->getOpcode()) {
2107 case Instruction::BitCast:
2108 case Instruction::IntToPtr:
2109 case Instruction::PtrToInt:
2110 // These casts are always fine if the casted value is.
2111 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants);
2113 // GEP is fine if it is simple + constant offset.
2114 case Instruction::GetElementPtr:
2115 for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i)
2116 if (!isa<ConstantInt>(CE->getOperand(i)))
2118 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants);
2120 case Instruction::Add:
2121 // We allow simple+cst.
2122 if (!isa<ConstantInt>(CE->getOperand(1)))
2124 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants);
2130 isSimpleEnoughValueToCommit(Constant *C,
2131 SmallPtrSet<Constant*, 8> &SimpleConstants) {
2132 // If we already checked this constant, we win.
2133 if (!SimpleConstants.insert(C)) return true;
2134 // Check the constant.
2135 return isSimpleEnoughValueToCommitHelper(C, SimpleConstants);
2139 /// isSimpleEnoughPointerToCommit - Return true if this constant is simple
2140 /// enough for us to understand. In particular, if it is a cast to anything
2141 /// other than from one pointer type to another pointer type, we punt.
2142 /// We basically just support direct accesses to globals and GEP's of
2143 /// globals. This should be kept up to date with CommitValueTo.
2144 static bool isSimpleEnoughPointerToCommit(Constant *C) {
2145 // Conservatively, avoid aggregate types. This is because we don't
2146 // want to worry about them partially overlapping other stores.
2147 if (!cast<PointerType>(C->getType())->getElementType()->isSingleValueType())
2150 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C))
2151 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or
2152 // external globals.
2153 return GV->hasUniqueInitializer();
2155 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
2156 // Handle a constantexpr gep.
2157 if (CE->getOpcode() == Instruction::GetElementPtr &&
2158 isa<GlobalVariable>(CE->getOperand(0)) &&
2159 cast<GEPOperator>(CE)->isInBounds()) {
2160 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0));
2161 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or
2162 // external globals.
2163 if (!GV->hasUniqueInitializer())
2166 // The first index must be zero.
2167 ConstantInt *CI = dyn_cast<ConstantInt>(*llvm::next(CE->op_begin()));
2168 if (!CI || !CI->isZero()) return false;
2170 // The remaining indices must be compile-time known integers within the
2171 // notional bounds of the corresponding static array types.
2172 if (!CE->isGEPWithNoNotionalOverIndexing())
2175 return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE);
2177 // A constantexpr bitcast from a pointer to another pointer is a no-op,
2178 // and we know how to evaluate it by moving the bitcast from the pointer
2179 // operand to the value operand.
2180 } else if (CE->getOpcode() == Instruction::BitCast &&
2181 isa<GlobalVariable>(CE->getOperand(0))) {
2182 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or
2183 // external globals.
2184 return cast<GlobalVariable>(CE->getOperand(0))->hasUniqueInitializer();
2191 /// EvaluateStoreInto - Evaluate a piece of a constantexpr store into a global
2192 /// initializer. This returns 'Init' modified to reflect 'Val' stored into it.
2193 /// At this point, the GEP operands of Addr [0, OpNo) have been stepped into.
2194 static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
2195 ConstantExpr *Addr, unsigned OpNo) {
2196 // Base case of the recursion.
2197 if (OpNo == Addr->getNumOperands()) {
2198 assert(Val->getType() == Init->getType() && "Type mismatch!");
2202 std::vector<Constant*> Elts;
2203 if (StructType *STy = dyn_cast<StructType>(Init->getType())) {
2205 // Break up the constant into its elements.
2206 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) {
2207 for (User::op_iterator i = CS->op_begin(), e = CS->op_end(); i != e; ++i)
2208 Elts.push_back(cast<Constant>(*i));
2209 } else if (isa<ConstantAggregateZero>(Init)) {
2210 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
2211 Elts.push_back(Constant::getNullValue(STy->getElementType(i)));
2212 } else if (isa<UndefValue>(Init)) {
2213 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
2214 Elts.push_back(UndefValue::get(STy->getElementType(i)));
2216 llvm_unreachable("This code is out of sync with "
2217 " ConstantFoldLoadThroughGEPConstantExpr");
2220 // Replace the element that we are supposed to.
2221 ConstantInt *CU = cast<ConstantInt>(Addr->getOperand(OpNo));
2222 unsigned Idx = CU->getZExtValue();
2223 assert(Idx < STy->getNumElements() && "Struct index out of range!");
2224 Elts[Idx] = EvaluateStoreInto(Elts[Idx], Val, Addr, OpNo+1);
2226 // Return the modified struct.
2227 return ConstantStruct::get(STy, Elts);
2230 ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo));
2231 SequentialType *InitTy = cast<SequentialType>(Init->getType());
2234 if (ArrayType *ATy = dyn_cast<ArrayType>(InitTy))
2235 NumElts = ATy->getNumElements();
2237 NumElts = cast<VectorType>(InitTy)->getNumElements();
2239 // Break up the array into elements.
2240 if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) {
2241 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i)
2242 Elts.push_back(cast<Constant>(*i));
2243 } else if (ConstantVector *CV = dyn_cast<ConstantVector>(Init)) {
2244 for (User::op_iterator i = CV->op_begin(), e = CV->op_end(); i != e; ++i)
2245 Elts.push_back(cast<Constant>(*i));
2246 } else if (isa<ConstantAggregateZero>(Init)) {
2247 Elts.assign(NumElts, Constant::getNullValue(InitTy->getElementType()));
2249 assert(isa<UndefValue>(Init) && "This code is out of sync with "
2250 " ConstantFoldLoadThroughGEPConstantExpr");
2251 Elts.assign(NumElts, UndefValue::get(InitTy->getElementType()));
2254 assert(CI->getZExtValue() < NumElts);
2255 Elts[CI->getZExtValue()] =
2256 EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1);
2258 if (Init->getType()->isArrayTy())
2259 return ConstantArray::get(cast<ArrayType>(InitTy), Elts);
2260 return ConstantVector::get(Elts);
2263 /// CommitValueTo - We have decided that Addr (which satisfies the predicate
2264 /// isSimpleEnoughPointerToCommit) should get Val as its value. Make it happen.
2265 static void CommitValueTo(Constant *Val, Constant *Addr) {
2266 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
2267 assert(GV->hasInitializer());
2268 GV->setInitializer(Val);
2272 ConstantExpr *CE = cast<ConstantExpr>(Addr);
2273 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0));
2274 GV->setInitializer(EvaluateStoreInto(GV->getInitializer(), Val, CE, 2));
2277 /// ComputeLoadResult - Return the value that would be computed by a load from
2278 /// P after the stores reflected by 'memory' have been performed. If we can't
2279 /// decide, return null.
2280 static Constant *ComputeLoadResult(Constant *P,
2281 const DenseMap<Constant*, Constant*> &Memory) {
2282 // If this memory location has been recently stored, use the stored value: it
2283 // is the most up-to-date.
2284 DenseMap<Constant*, Constant*>::const_iterator I = Memory.find(P);
2285 if (I != Memory.end()) return I->second;
2288 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
2289 if (GV->hasDefinitiveInitializer())
2290 return GV->getInitializer();
2294 // Handle a constantexpr getelementptr.
2295 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(P))
2296 if (CE->getOpcode() == Instruction::GetElementPtr &&
2297 isa<GlobalVariable>(CE->getOperand(0))) {
2298 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0));
2299 if (GV->hasDefinitiveInitializer())
2300 return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE);
2303 return 0; // don't know how to evaluate.
2306 /// EvaluateFunction - Evaluate a call to function F, returning true if
2307 /// successful, false if we can't evaluate it. ActualArgs contains the formal
2308 /// arguments for the function.
2309 static bool EvaluateFunction(Function *F, Constant *&RetVal,
2310 const SmallVectorImpl<Constant*> &ActualArgs,
2311 std::vector<Function*> &CallStack,
2312 DenseMap<Constant*, Constant*> &MutatedMemory,
2313 std::vector<GlobalVariable*> &AllocaTmps,
2314 SmallPtrSet<Constant*, 8> &SimpleConstants,
2315 const TargetData *TD,
2316 const TargetLibraryInfo *TLI) {
2317 // Check to see if this function is already executing (recursion). If so,
2318 // bail out. TODO: we might want to accept limited recursion.
2319 if (std::find(CallStack.begin(), CallStack.end(), F) != CallStack.end())
2322 CallStack.push_back(F);
2324 /// Values - As we compute SSA register values, we store their contents here.
2325 DenseMap<Value*, Constant*> Values;
2327 // Initialize arguments to the incoming values specified.
2329 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E;
2331 Values[AI] = ActualArgs[ArgNo];
2333 /// ExecutedBlocks - We only handle non-looping, non-recursive code. As such,
2334 /// we can only evaluate any one basic block at most once. This set keeps
2335 /// track of what we have executed so we can detect recursive cases etc.
2336 SmallPtrSet<BasicBlock*, 32> ExecutedBlocks;
2338 // CurInst - The current instruction we're evaluating.
2339 BasicBlock::iterator CurInst = F->begin()->begin();
2341 // This is the main evaluation loop.
2343 Constant *InstResult = 0;
2345 if (StoreInst *SI = dyn_cast<StoreInst>(CurInst)) {
2346 if (!SI->isSimple()) return false; // no volatile/atomic accesses.
2347 Constant *Ptr = getVal(Values, SI->getOperand(1));
2348 if (!isSimpleEnoughPointerToCommit(Ptr))
2349 // If this is too complex for us to commit, reject it.
2352 Constant *Val = getVal(Values, SI->getOperand(0));
2354 // If this might be too difficult for the backend to handle (e.g. the addr
2355 // of one global variable divided by another) then we can't commit it.
2356 if (!isSimpleEnoughValueToCommit(Val, SimpleConstants))
2359 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
2360 if (CE->getOpcode() == Instruction::BitCast) {
2361 // If we're evaluating a store through a bitcast, then we need
2362 // to pull the bitcast off the pointer type and push it onto the
2364 Ptr = CE->getOperand(0);
2366 Type *NewTy=cast<PointerType>(Ptr->getType())->getElementType();
2368 // In order to push the bitcast onto the stored value, a bitcast
2369 // from NewTy to Val's type must be legal. If it's not, we can try
2370 // introspecting NewTy to find a legal conversion.
2371 while (!Val->getType()->canLosslesslyBitCastTo(NewTy)) {
2372 // If NewTy is a struct, we can convert the pointer to the struct
2373 // into a pointer to its first member.
2374 // FIXME: This could be extended to support arrays as well.
2375 if (StructType *STy = dyn_cast<StructType>(NewTy)) {
2376 NewTy = STy->getTypeAtIndex(0U);
2378 IntegerType *IdxTy =IntegerType::get(NewTy->getContext(), 32);
2379 Constant *IdxZero = ConstantInt::get(IdxTy, 0, false);
2380 Constant * const IdxList[] = {IdxZero, IdxZero};
2382 Ptr = ConstantExpr::getGetElementPtr(Ptr, IdxList);
2384 // If we can't improve the situation by introspecting NewTy,
2385 // we have to give up.
2391 // If we found compatible types, go ahead and push the bitcast
2392 // onto the stored value.
2393 Val = ConstantExpr::getBitCast(Val, NewTy);
2396 MutatedMemory[Ptr] = Val;
2397 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CurInst)) {
2398 InstResult = ConstantExpr::get(BO->getOpcode(),
2399 getVal(Values, BO->getOperand(0)),
2400 getVal(Values, BO->getOperand(1)));
2401 } else if (CmpInst *CI = dyn_cast<CmpInst>(CurInst)) {
2402 InstResult = ConstantExpr::getCompare(CI->getPredicate(),
2403 getVal(Values, CI->getOperand(0)),
2404 getVal(Values, CI->getOperand(1)));
2405 } else if (CastInst *CI = dyn_cast<CastInst>(CurInst)) {
2406 InstResult = ConstantExpr::getCast(CI->getOpcode(),
2407 getVal(Values, CI->getOperand(0)),
2409 } else if (SelectInst *SI = dyn_cast<SelectInst>(CurInst)) {
2410 InstResult = ConstantExpr::getSelect(getVal(Values, SI->getOperand(0)),
2411 getVal(Values, SI->getOperand(1)),
2412 getVal(Values, SI->getOperand(2)));
2413 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurInst)) {
2414 Constant *P = getVal(Values, GEP->getOperand(0));
2415 SmallVector<Constant*, 8> GEPOps;
2416 for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end();
2418 GEPOps.push_back(getVal(Values, *i));
2420 ConstantExpr::getGetElementPtr(P, GEPOps,
2421 cast<GEPOperator>(GEP)->isInBounds());
2422 } else if (LoadInst *LI = dyn_cast<LoadInst>(CurInst)) {
2423 if (!LI->isSimple()) return false; // no volatile/atomic accesses.
2424 InstResult = ComputeLoadResult(getVal(Values, LI->getOperand(0)),
2426 if (InstResult == 0) return false; // Could not evaluate load.
2427 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(CurInst)) {
2428 if (AI->isArrayAllocation()) return false; // Cannot handle array allocs.
2429 Type *Ty = AI->getType()->getElementType();
2430 AllocaTmps.push_back(new GlobalVariable(Ty, false,
2431 GlobalValue::InternalLinkage,
2432 UndefValue::get(Ty),
2434 InstResult = AllocaTmps.back();
2435 } else if (CallInst *CI = dyn_cast<CallInst>(CurInst)) {
2437 // Debug info can safely be ignored here.
2438 if (isa<DbgInfoIntrinsic>(CI)) {
2443 // Cannot handle inline asm.
2444 if (isa<InlineAsm>(CI->getCalledValue())) return false;
2446 if (MemSetInst *MSI = dyn_cast<MemSetInst>(CI)) {
2447 if (MSI->isVolatile()) return false;
2448 Constant *Ptr = getVal(Values, MSI->getDest());
2449 Constant *Val = getVal(Values, MSI->getValue());
2450 Constant *DestVal = ComputeLoadResult(getVal(Values, Ptr),
2452 if (Val->isNullValue() && DestVal && DestVal->isNullValue()) {
2453 // This memset is a no-op.
2460 // Resolve function pointers.
2461 Function *Callee = dyn_cast<Function>(getVal(Values,
2462 CI->getCalledValue()));
2463 if (!Callee) return false; // Cannot resolve.
2465 SmallVector<Constant*, 8> Formals;
2467 for (User::op_iterator i = CS.arg_begin(), e = CS.arg_end();
2469 Formals.push_back(getVal(Values, *i));
2471 if (Callee->isDeclaration()) {
2472 // If this is a function we can constant fold, do it.
2473 if (Constant *C = ConstantFoldCall(Callee, Formals, TLI)) {
2479 if (Callee->getFunctionType()->isVarArg())
2483 // Execute the call, if successful, use the return value.
2484 if (!EvaluateFunction(Callee, RetVal, Formals, CallStack,
2485 MutatedMemory, AllocaTmps, SimpleConstants, TD,
2488 InstResult = RetVal;
2490 } else if (isa<TerminatorInst>(CurInst)) {
2491 BasicBlock *NewBB = 0;
2492 if (BranchInst *BI = dyn_cast<BranchInst>(CurInst)) {
2493 if (BI->isUnconditional()) {
2494 NewBB = BI->getSuccessor(0);
2497 dyn_cast<ConstantInt>(getVal(Values, BI->getCondition()));
2498 if (!Cond) return false; // Cannot determine.
2500 NewBB = BI->getSuccessor(!Cond->getZExtValue());
2502 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(CurInst)) {
2504 dyn_cast<ConstantInt>(getVal(Values, SI->getCondition()));
2505 if (!Val) return false; // Cannot determine.
2506 NewBB = SI->getSuccessor(SI->findCaseValue(Val));
2507 } else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(CurInst)) {
2508 Value *Val = getVal(Values, IBI->getAddress())->stripPointerCasts();
2509 if (BlockAddress *BA = dyn_cast<BlockAddress>(Val))
2510 NewBB = BA->getBasicBlock();
2512 return false; // Cannot determine.
2513 } else if (ReturnInst *RI = dyn_cast<ReturnInst>(CurInst)) {
2514 if (RI->getNumOperands())
2515 RetVal = getVal(Values, RI->getOperand(0));
2517 CallStack.pop_back(); // return from fn.
2518 return true; // We succeeded at evaluating this ctor!
2520 // invoke, unwind, resume, unreachable.
2521 return false; // Cannot handle this terminator.
2524 // Okay, we succeeded in evaluating this control flow. See if we have
2525 // executed the new block before. If so, we have a looping function,
2526 // which we cannot evaluate in reasonable time.
2527 if (!ExecutedBlocks.insert(NewBB))
2528 return false; // looped!
2530 // Okay, we have never been in this block before. Check to see if there
2531 // are any PHI nodes. If so, evaluate them with information about where
2533 BasicBlock *OldBB = CurInst->getParent();
2534 CurInst = NewBB->begin();
2536 for (; (PN = dyn_cast<PHINode>(CurInst)); ++CurInst)
2537 Values[PN] = getVal(Values, PN->getIncomingValueForBlock(OldBB));
2539 // Do NOT increment CurInst. We know that the terminator had no value.
2542 // Did not know how to evaluate this!
2546 if (!CurInst->use_empty()) {
2547 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(InstResult))
2548 InstResult = ConstantFoldConstantExpression(CE, TD, TLI);
2550 Values[CurInst] = InstResult;
2553 // Advance program counter.
2558 /// EvaluateStaticConstructor - Evaluate static constructors in the function, if
2559 /// we can. Return true if we can, false otherwise.
2560 static bool EvaluateStaticConstructor(Function *F, const TargetData *TD,
2561 const TargetLibraryInfo *TLI) {
2562 /// MutatedMemory - For each store we execute, we update this map. Loads
2563 /// check this to get the most up-to-date value. If evaluation is successful,
2564 /// this state is committed to the process.
2565 DenseMap<Constant*, Constant*> MutatedMemory;
2567 /// AllocaTmps - To 'execute' an alloca, we create a temporary global variable
2568 /// to represent its body. This vector is needed so we can delete the
2569 /// temporary globals when we are done.
2570 std::vector<GlobalVariable*> AllocaTmps;
2572 /// CallStack - This is used to detect recursion. In pathological situations
2573 /// we could hit exponential behavior, but at least there is nothing
2575 std::vector<Function*> CallStack;
2577 /// SimpleConstants - These are constants we have checked and know to be
2578 /// simple enough to live in a static initializer of a global.
2579 SmallPtrSet<Constant*, 8> SimpleConstants;
2581 // Call the function.
2582 Constant *RetValDummy;
2583 bool EvalSuccess = EvaluateFunction(F, RetValDummy,
2584 SmallVector<Constant*, 0>(), CallStack,
2585 MutatedMemory, AllocaTmps,
2586 SimpleConstants, TD, TLI);
2589 // We succeeded at evaluation: commit the result.
2590 DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '"
2591 << F->getName() << "' to " << MutatedMemory.size()
2593 for (DenseMap<Constant*, Constant*>::iterator I = MutatedMemory.begin(),
2594 E = MutatedMemory.end(); I != E; ++I)
2595 CommitValueTo(I->second, I->first);
2598 // At this point, we are done interpreting. If we created any 'alloca'
2599 // temporaries, release them now.
2600 while (!AllocaTmps.empty()) {
2601 GlobalVariable *Tmp = AllocaTmps.back();
2602 AllocaTmps.pop_back();
2604 // If there are still users of the alloca, the program is doing something
2605 // silly, e.g. storing the address of the alloca somewhere and using it
2606 // later. Since this is undefined, we'll just make it be null.
2607 if (!Tmp->use_empty())
2608 Tmp->replaceAllUsesWith(Constant::getNullValue(Tmp->getType()));
2615 /// OptimizeGlobalCtorsList - Simplify and evaluation global ctors if possible.
2616 /// Return true if anything changed.
2617 bool GlobalOpt::OptimizeGlobalCtorsList(GlobalVariable *&GCL) {
2618 std::vector<Function*> Ctors = ParseGlobalCtors(GCL);
2619 bool MadeChange = false;
2620 if (Ctors.empty()) return false;
2622 const TargetData *TD = getAnalysisIfAvailable<TargetData>();
2623 const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
2625 // Loop over global ctors, optimizing them when we can.
2626 for (unsigned i = 0; i != Ctors.size(); ++i) {
2627 Function *F = Ctors[i];
2628 // Found a null terminator in the middle of the list, prune off the rest of
2631 if (i != Ctors.size()-1) {
2638 // We cannot simplify external ctor functions.
2639 if (F->empty()) continue;
2641 // If we can evaluate the ctor at compile time, do.
2642 if (EvaluateStaticConstructor(F, TD, TLI)) {
2643 Ctors.erase(Ctors.begin()+i);
2646 ++NumCtorsEvaluated;
2651 if (!MadeChange) return false;
2653 GCL = InstallGlobalCtors(GCL, Ctors);
2657 bool GlobalOpt::OptimizeGlobalAliases(Module &M) {
2658 bool Changed = false;
2660 for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end();
2662 Module::alias_iterator J = I++;
2663 // Aliases without names cannot be referenced outside this module.
2664 if (!J->hasName() && !J->isDeclaration())
2665 J->setLinkage(GlobalValue::InternalLinkage);
2666 // If the aliasee may change at link time, nothing can be done - bail out.
2667 if (J->mayBeOverridden())
2670 Constant *Aliasee = J->getAliasee();
2671 GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts());
2672 Target->removeDeadConstantUsers();
2673 bool hasOneUse = Target->hasOneUse() && Aliasee->hasOneUse();
2675 // Make all users of the alias use the aliasee instead.
2676 if (!J->use_empty()) {
2677 J->replaceAllUsesWith(Aliasee);
2678 ++NumAliasesResolved;
2682 // If the alias is externally visible, we may still be able to simplify it.
2683 if (!J->hasLocalLinkage()) {
2684 // If the aliasee has internal linkage, give it the name and linkage
2685 // of the alias, and delete the alias. This turns:
2686 // define internal ... @f(...)
2687 // @a = alias ... @f
2689 // define ... @a(...)
2690 if (!Target->hasLocalLinkage())
2693 // Do not perform the transform if multiple aliases potentially target the
2694 // aliasee. This check also ensures that it is safe to replace the section
2695 // and other attributes of the aliasee with those of the alias.
2699 // Give the aliasee the name, linkage and other attributes of the alias.
2700 Target->takeName(J);
2701 Target->setLinkage(J->getLinkage());
2702 Target->GlobalValue::copyAttributesFrom(J);
2705 // Delete the alias.
2706 M.getAliasList().erase(J);
2707 ++NumAliasesRemoved;
2714 static Function *FindCXAAtExit(Module &M) {
2715 Function *Fn = M.getFunction("__cxa_atexit");
2720 FunctionType *FTy = Fn->getFunctionType();
2722 // Checking that the function has the right return type, the right number of
2723 // parameters and that they all have pointer types should be enough.
2724 if (!FTy->getReturnType()->isIntegerTy() ||
2725 FTy->getNumParams() != 3 ||
2726 !FTy->getParamType(0)->isPointerTy() ||
2727 !FTy->getParamType(1)->isPointerTy() ||
2728 !FTy->getParamType(2)->isPointerTy())
2734 /// cxxDtorIsEmpty - Returns whether the given function is an empty C++
2735 /// destructor and can therefore be eliminated.
2736 /// Note that we assume that other optimization passes have already simplified
2737 /// the code so we only look for a function with a single basic block, where
2738 /// the only allowed instructions are 'ret' or 'call' to empty C++ dtor.
2739 static bool cxxDtorIsEmpty(const Function &Fn,
2740 SmallPtrSet<const Function *, 8> &CalledFunctions) {
2741 // FIXME: We could eliminate C++ destructors if they're readonly/readnone and
2742 // nounwind, but that doesn't seem worth doing.
2743 if (Fn.isDeclaration())
2746 if (++Fn.begin() != Fn.end())
2749 const BasicBlock &EntryBlock = Fn.getEntryBlock();
2750 for (BasicBlock::const_iterator I = EntryBlock.begin(), E = EntryBlock.end();
2752 if (const CallInst *CI = dyn_cast<CallInst>(I)) {
2753 // Ignore debug intrinsics.
2754 if (isa<DbgInfoIntrinsic>(CI))
2757 const Function *CalledFn = CI->getCalledFunction();
2762 SmallPtrSet<const Function *, 8> NewCalledFunctions(CalledFunctions);
2764 // Don't treat recursive functions as empty.
2765 if (!NewCalledFunctions.insert(CalledFn))
2768 if (!cxxDtorIsEmpty(*CalledFn, NewCalledFunctions))
2770 } else if (isa<ReturnInst>(*I))
2779 bool GlobalOpt::OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) {
2780 /// Itanium C++ ABI p3.3.5:
2782 /// After constructing a global (or local static) object, that will require
2783 /// destruction on exit, a termination function is registered as follows:
2785 /// extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d );
2787 /// This registration, e.g. __cxa_atexit(f,p,d), is intended to cause the
2788 /// call f(p) when DSO d is unloaded, before all such termination calls
2789 /// registered before this one. It returns zero if registration is
2790 /// successful, nonzero on failure.
2792 // This pass will look for calls to __cxa_atexit where the function is trivial
2794 bool Changed = false;
2796 for (Function::use_iterator I = CXAAtExitFn->use_begin(),
2797 E = CXAAtExitFn->use_end(); I != E;) {
2798 // We're only interested in calls. Theoretically, we could handle invoke
2799 // instructions as well, but neither llvm-gcc nor clang generate invokes
2801 CallInst *CI = dyn_cast<CallInst>(*I++);
2806 dyn_cast<Function>(CI->getArgOperand(0)->stripPointerCasts());
2810 SmallPtrSet<const Function *, 8> CalledFunctions;
2811 if (!cxxDtorIsEmpty(*DtorFn, CalledFunctions))
2814 // Just remove the call.
2815 CI->replaceAllUsesWith(Constant::getNullValue(CI->getType()));
2816 CI->eraseFromParent();
2818 ++NumCXXDtorsRemoved;
2826 bool GlobalOpt::runOnModule(Module &M) {
2827 bool Changed = false;
2829 // Try to find the llvm.globalctors list.
2830 GlobalVariable *GlobalCtors = FindGlobalCtors(M);
2832 Function *CXAAtExitFn = FindCXAAtExit(M);
2834 bool LocalChange = true;
2835 while (LocalChange) {
2836 LocalChange = false;
2838 // Delete functions that are trivially dead, ccc -> fastcc
2839 LocalChange |= OptimizeFunctions(M);
2841 // Optimize global_ctors list.
2843 LocalChange |= OptimizeGlobalCtorsList(GlobalCtors);
2845 // Optimize non-address-taken globals.
2846 LocalChange |= OptimizeGlobalVars(M);
2848 // Resolve aliases, when possible.
2849 LocalChange |= OptimizeGlobalAliases(M);
2851 // Try to remove trivial global destructors.
2853 LocalChange |= OptimizeEmptyGlobalCXXDtors(CXAAtExitFn);
2855 Changed |= LocalChange;
2858 // TODO: Move all global ctors functions to the end of the module for code