1 //===- MergeFunctions.cpp - Merge identical functions ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass looks for equivalent functions that are mergable and folds them.
12 // A hash is computed from the function, based on its type and number of
15 // Once all hashes are computed, we perform an expensive equality comparison
16 // on each function pair. This takes n^2/2 comparisons per bucket, so it's
17 // important that the hash function be high quality. The equality comparison
18 // iterates through each instruction in each basic block.
20 // When a match is found, the functions are folded. We can only fold two
21 // functions when we know that the definition of one of them is not
24 //===----------------------------------------------------------------------===//
28 // * fold vector<T*>::push_back and vector<S*>::push_back.
30 // These two functions have different types, but in a way that doesn't matter
31 // to us. As long as we never see an S or T itself, using S* and S** is the
32 // same as using a T* and T**.
34 // * virtual functions.
36 // Many functions have their address taken by the virtual function table for
37 // the object they belong to. However, as long as it's only used for a lookup
38 // and call, this is irrelevant, and we'd like to fold such implementations.
40 //===----------------------------------------------------------------------===//
42 #define DEBUG_TYPE "mergefunc"
43 #include "llvm/Transforms/IPO.h"
44 #include "llvm/ADT/DenseMap.h"
45 #include "llvm/ADT/FoldingSet.h"
46 #include "llvm/ADT/Statistic.h"
47 #include "llvm/Constants.h"
48 #include "llvm/InlineAsm.h"
49 #include "llvm/Instructions.h"
50 #include "llvm/Module.h"
51 #include "llvm/Pass.h"
52 #include "llvm/Support/CallSite.h"
53 #include "llvm/Support/Compiler.h"
54 #include "llvm/Support/Debug.h"
59 STATISTIC(NumFunctionsMerged, "Number of functions merged");
62 struct VISIBILITY_HIDDEN MergeFunctions : public ModulePass {
63 static char ID; // Pass identification, replacement for typeid
64 MergeFunctions() : ModulePass((intptr_t)&ID) {}
66 bool runOnModule(Module &M);
70 char MergeFunctions::ID = 0;
71 static RegisterPass<MergeFunctions>
72 X("mergefunc", "Merge Functions");
74 ModulePass *llvm::createMergeFunctionsPass() {
75 return new MergeFunctions();
78 // ===----------------------------------------------------------------------===
79 // Comparison of functions
80 // ===----------------------------------------------------------------------===
82 static unsigned long hash(const Function *F) {
83 const FunctionType *FTy = F->getFunctionType();
86 ID.AddInteger(F->size());
87 ID.AddInteger(F->getCallingConv());
88 ID.AddBoolean(F->hasGC());
89 ID.AddBoolean(FTy->isVarArg());
90 ID.AddInteger(FTy->getReturnType()->getTypeID());
91 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
92 ID.AddInteger(FTy->getParamType(i)->getTypeID());
93 return ID.ComputeHash();
96 /// IgnoreBitcasts - given a bitcast, returns the first non-bitcast found by
97 /// walking the chain of cast operands. Otherwise, returns the argument.
98 static Value* IgnoreBitcasts(Value *V) {
99 while (BitCastInst *BC = dyn_cast<BitCastInst>(V))
100 V = BC->getOperand(0);
105 /// isEquivalentType - any two pointers are equivalent. Otherwise, standard
106 /// type equivalence rules apply.
107 static bool isEquivalentType(const Type *Ty1, const Type *Ty2) {
110 if (Ty1->getTypeID() != Ty2->getTypeID())
113 switch(Ty1->getTypeID()) {
115 case Type::FloatTyID:
116 case Type::DoubleTyID:
117 case Type::X86_FP80TyID:
118 case Type::FP128TyID:
119 case Type::PPC_FP128TyID:
120 case Type::LabelTyID:
121 case Type::MetadataTyID:
124 case Type::IntegerTyID:
125 case Type::OpaqueTyID:
126 // Ty1 == Ty2 would have returned true earlier.
130 assert(0 && "Unknown type!");
133 case Type::PointerTyID: {
134 const PointerType *PTy1 = cast<PointerType>(Ty1);
135 const PointerType *PTy2 = cast<PointerType>(Ty2);
136 return PTy1->getAddressSpace() == PTy2->getAddressSpace();
139 case Type::StructTyID: {
140 const StructType *STy1 = cast<StructType>(Ty1);
141 const StructType *STy2 = cast<StructType>(Ty2);
142 if (STy1->getNumElements() != STy2->getNumElements())
145 if (STy1->isPacked() != STy2->isPacked())
148 for (unsigned i = 0, e = STy1->getNumElements(); i != e; ++i) {
149 if (!isEquivalentType(STy1->getElementType(i), STy2->getElementType(i)))
155 case Type::FunctionTyID: {
156 const FunctionType *FTy1 = cast<FunctionType>(Ty1);
157 const FunctionType *FTy2 = cast<FunctionType>(Ty2);
158 if (FTy1->getNumParams() != FTy2->getNumParams() ||
159 FTy1->isVarArg() != FTy2->isVarArg())
162 if (!isEquivalentType(FTy1->getReturnType(), FTy2->getReturnType()))
165 for (unsigned i = 0, e = FTy1->getNumParams(); i != e; ++i) {
166 if (!isEquivalentType(FTy1->getParamType(i), FTy2->getParamType(i)))
172 case Type::ArrayTyID:
173 case Type::VectorTyID: {
174 const SequentialType *STy1 = cast<SequentialType>(Ty1);
175 const SequentialType *STy2 = cast<SequentialType>(Ty2);
176 return isEquivalentType(STy1->getElementType(), STy2->getElementType());
181 /// isEquivalentOperation - determine whether the two operations are the same
182 /// except that pointer-to-A and pointer-to-B are equivalent. This should be
183 /// kept in sync with Instruction::isSameOperationAs.
185 isEquivalentOperation(const Instruction *I1, const Instruction *I2) {
186 if (I1->getOpcode() != I2->getOpcode() ||
187 I1->getNumOperands() != I2->getNumOperands() ||
188 !isEquivalentType(I1->getType(), I2->getType()))
191 // We have two instructions of identical opcode and #operands. Check to see
192 // if all operands are the same type
193 for (unsigned i = 0, e = I1->getNumOperands(); i != e; ++i)
194 if (!isEquivalentType(I1->getOperand(i)->getType(),
195 I2->getOperand(i)->getType()))
198 // Check special state that is a part of some instructions.
199 if (const LoadInst *LI = dyn_cast<LoadInst>(I1))
200 return LI->isVolatile() == cast<LoadInst>(I2)->isVolatile() &&
201 LI->getAlignment() == cast<LoadInst>(I2)->getAlignment();
202 if (const StoreInst *SI = dyn_cast<StoreInst>(I1))
203 return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
204 SI->getAlignment() == cast<StoreInst>(I2)->getAlignment();
205 if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
206 return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
207 if (const CallInst *CI = dyn_cast<CallInst>(I1))
208 return CI->isTailCall() == cast<CallInst>(I2)->isTailCall() &&
209 CI->getCallingConv() == cast<CallInst>(I2)->getCallingConv() &&
210 CI->getAttributes().getRawPointer() ==
211 cast<CallInst>(I2)->getAttributes().getRawPointer();
212 if (const InvokeInst *CI = dyn_cast<InvokeInst>(I1))
213 return CI->getCallingConv() == cast<InvokeInst>(I2)->getCallingConv() &&
214 CI->getAttributes().getRawPointer() ==
215 cast<InvokeInst>(I2)->getAttributes().getRawPointer();
216 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(I1)) {
217 if (IVI->getNumIndices() != cast<InsertValueInst>(I2)->getNumIndices())
219 for (unsigned i = 0, e = IVI->getNumIndices(); i != e; ++i)
220 if (IVI->idx_begin()[i] != cast<InsertValueInst>(I2)->idx_begin()[i])
224 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I1)) {
225 if (EVI->getNumIndices() != cast<ExtractValueInst>(I2)->getNumIndices())
227 for (unsigned i = 0, e = EVI->getNumIndices(); i != e; ++i)
228 if (EVI->idx_begin()[i] != cast<ExtractValueInst>(I2)->idx_begin()[i])
236 static bool compare(const Value *V, const Value *U) {
237 assert(!isa<BasicBlock>(V) && !isa<BasicBlock>(U) &&
238 "Must not compare basic blocks.");
240 assert(isEquivalentType(V->getType(), U->getType()) &&
241 "Two of the same operation have operands of different type.");
243 // TODO: If the constant is an expression of F, we should accept that it's
244 // equal to the same expression in terms of G.
245 if (isa<Constant>(V))
248 // The caller has ensured that ValueMap[V] != U. Since Arguments are
249 // pre-loaded into the ValueMap, and Instructions are added as we go, we know
250 // that this can only be a mis-match.
251 if (isa<Instruction>(V) || isa<Argument>(V))
254 if (isa<InlineAsm>(V) && isa<InlineAsm>(U)) {
255 const InlineAsm *IAF = cast<InlineAsm>(V);
256 const InlineAsm *IAG = cast<InlineAsm>(U);
257 return IAF->getAsmString() == IAG->getAsmString() &&
258 IAF->getConstraintString() == IAG->getConstraintString();
264 static bool equals(const BasicBlock *BB1, const BasicBlock *BB2,
265 DenseMap<const Value *, const Value *> &ValueMap,
266 DenseMap<const Value *, const Value *> &SpeculationMap) {
267 // Speculatively add it anyways. If it's false, we'll notice a difference
268 // later, and this won't matter.
271 BasicBlock::const_iterator FI = BB1->begin(), FE = BB1->end();
272 BasicBlock::const_iterator GI = BB2->begin(), GE = BB2->end();
275 if (isa<BitCastInst>(FI)) {
279 if (isa<BitCastInst>(GI)) {
284 if (!isEquivalentOperation(FI, GI))
287 if (isa<GetElementPtrInst>(FI)) {
288 const GetElementPtrInst *GEPF = cast<GetElementPtrInst>(FI);
289 const GetElementPtrInst *GEPG = cast<GetElementPtrInst>(GI);
290 if (GEPF->hasAllZeroIndices() && GEPG->hasAllZeroIndices()) {
291 // It's effectively a bitcast.
296 // TODO: we only really care about the elements before the index
297 if (FI->getOperand(0)->getType() != GI->getOperand(0)->getType())
301 if (ValueMap[FI] == GI) {
306 if (ValueMap[FI] != NULL)
309 for (unsigned i = 0, e = FI->getNumOperands(); i != e; ++i) {
310 Value *OpF = IgnoreBitcasts(FI->getOperand(i));
311 Value *OpG = IgnoreBitcasts(GI->getOperand(i));
313 if (ValueMap[OpF] == OpG)
316 if (ValueMap[OpF] != NULL)
319 if (OpF->getValueID() != OpG->getValueID() ||
320 !isEquivalentType(OpF->getType(), OpG->getType()))
323 if (isa<PHINode>(FI)) {
324 if (SpeculationMap[OpF] == NULL)
325 SpeculationMap[OpF] = OpG;
326 else if (SpeculationMap[OpF] != OpG)
329 } else if (isa<BasicBlock>(OpF)) {
330 assert(isa<TerminatorInst>(FI) &&
331 "BasicBlock referenced by non-Terminator non-PHI");
332 // This call changes the ValueMap, hence we can't use
333 // Value *& = ValueMap[...]
334 if (!equals(cast<BasicBlock>(OpF), cast<BasicBlock>(OpG), ValueMap,
338 if (!compare(OpF, OpG))
347 } while (FI != FE && GI != GE);
349 return FI == FE && GI == GE;
352 static bool equals(const Function *F, const Function *G) {
353 // We need to recheck everything, but check the things that weren't included
354 // in the hash first.
356 if (F->getAttributes() != G->getAttributes())
359 if (F->hasGC() != G->hasGC())
362 if (F->hasGC() && F->getGC() != G->getGC())
365 if (F->hasSection() != G->hasSection())
368 if (F->hasSection() && F->getSection() != G->getSection())
371 if (F->isVarArg() != G->isVarArg())
374 // TODO: if it's internal and only used in direct calls, we could handle this
376 if (F->getCallingConv() != G->getCallingConv())
379 if (!isEquivalentType(F->getFunctionType(), G->getFunctionType()))
382 DenseMap<const Value *, const Value *> ValueMap;
383 DenseMap<const Value *, const Value *> SpeculationMap;
386 assert(F->arg_size() == G->arg_size() &&
387 "Identical functions have a different number of args.");
389 for (Function::const_arg_iterator fi = F->arg_begin(), gi = G->arg_begin(),
390 fe = F->arg_end(); fi != fe; ++fi, ++gi)
393 if (!equals(&F->getEntryBlock(), &G->getEntryBlock(), ValueMap,
397 for (DenseMap<const Value *, const Value *>::iterator
398 I = SpeculationMap.begin(), E = SpeculationMap.end(); I != E; ++I) {
399 if (ValueMap[I->first] != I->second)
406 // ===----------------------------------------------------------------------===
407 // Folding of functions
408 // ===----------------------------------------------------------------------===
411 // * F is external strong, G is external strong:
412 // turn G into a thunk to F (1)
413 // * F is external strong, G is external weak:
414 // turn G into a thunk to F (1)
415 // * F is external weak, G is external weak:
417 // * F is external strong, G is internal:
418 // address of G taken:
419 // turn G into a thunk to F (1)
420 // address of G not taken:
421 // make G an alias to F (2)
422 // * F is internal, G is external weak
423 // address of F is taken:
424 // turn G into a thunk to F (1)
425 // address of F is not taken:
426 // make G an alias of F (2)
427 // * F is internal, G is internal:
428 // address of F and G are taken:
429 // turn G into a thunk to F (1)
430 // address of G is not taken:
431 // make G an alias to F (2)
433 // alias requires linkage == (external,local,weak) fallback to creating a thunk
434 // external means 'externally visible' linkage != (internal,private)
435 // internal means linkage == (internal,private)
436 // weak means linkage mayBeOverridable
437 // being external implies that the address is taken
439 // 1. turn G into a thunk to F
440 // 2. make G an alias to F
442 enum LinkageCategory {
448 static LinkageCategory categorize(const Function *F) {
449 switch (F->getLinkage()) {
450 case GlobalValue::InternalLinkage:
451 case GlobalValue::PrivateLinkage:
454 case GlobalValue::WeakAnyLinkage:
455 case GlobalValue::WeakODRLinkage:
456 case GlobalValue::ExternalWeakLinkage:
459 case GlobalValue::ExternalLinkage:
460 case GlobalValue::AvailableExternallyLinkage:
461 case GlobalValue::LinkOnceAnyLinkage:
462 case GlobalValue::LinkOnceODRLinkage:
463 case GlobalValue::AppendingLinkage:
464 case GlobalValue::DLLImportLinkage:
465 case GlobalValue::DLLExportLinkage:
466 case GlobalValue::GhostLinkage:
467 case GlobalValue::CommonLinkage:
468 return ExternalStrong;
471 assert(0 && "Unknown LinkageType.");
475 static void ThunkGToF(Function *F, Function *G) {
476 Function *NewG = Function::Create(G->getFunctionType(), G->getLinkage(), "",
478 BasicBlock *BB = BasicBlock::Create("", NewG);
480 std::vector<Value *> Args;
482 const FunctionType *FFTy = F->getFunctionType();
483 for (Function::arg_iterator AI = NewG->arg_begin(), AE = NewG->arg_end();
485 if (FFTy->getParamType(i) == AI->getType())
488 Value *BCI = new BitCastInst(AI, FFTy->getParamType(i), "", BB);
494 CallInst *CI = CallInst::Create(F, Args.begin(), Args.end(), "", BB);
496 CI->setCallingConv(F->getCallingConv());
497 if (NewG->getReturnType() == Type::VoidTy) {
498 ReturnInst::Create(BB);
499 } else if (CI->getType() != NewG->getReturnType()) {
500 Value *BCI = new BitCastInst(CI, NewG->getReturnType(), "", BB);
501 ReturnInst::Create(BCI, BB);
503 ReturnInst::Create(CI, BB);
506 NewG->copyAttributesFrom(G);
508 G->replaceAllUsesWith(NewG);
509 G->eraseFromParent();
511 // TODO: look at direct callers to G and make them all direct callers to F.
514 static void AliasGToF(Function *F, Function *G) {
515 if (!G->hasExternalLinkage() && !G->hasLocalLinkage() && !G->hasWeakLinkage())
516 return ThunkGToF(F, G);
518 GlobalAlias *GA = new GlobalAlias(
519 G->getType(), G->getLinkage(), "",
520 ConstantExpr::getBitCast(F, G->getType()), G->getParent());
521 F->setAlignment(std::max(F->getAlignment(), G->getAlignment()));
523 GA->setVisibility(G->getVisibility());
524 G->replaceAllUsesWith(GA);
525 G->eraseFromParent();
528 static bool fold(std::vector<Function *> &FnVec, unsigned i, unsigned j) {
529 Function *F = FnVec[i];
530 Function *G = FnVec[j];
532 LinkageCategory catF = categorize(F);
533 LinkageCategory catG = categorize(G);
535 if (catF == ExternalWeak || (catF == Internal && catG == ExternalStrong)) {
536 std::swap(FnVec[i], FnVec[j]);
538 std::swap(catF, catG);
549 if (G->hasAddressTaken())
558 assert(catG == ExternalWeak);
560 // Make them both thunks to the same internal function.
561 F->setAlignment(std::max(F->getAlignment(), G->getAlignment()));
562 Function *H = Function::Create(F->getFunctionType(), F->getLinkage(), "",
564 H->copyAttributesFrom(F);
566 F->replaceAllUsesWith(H);
571 F->setLinkage(GlobalValue::InternalLinkage);
580 if (F->hasAddressTaken())
586 bool addrTakenF = F->hasAddressTaken();
587 bool addrTakenG = G->hasAddressTaken();
588 if (!addrTakenF && addrTakenG) {
589 std::swap(FnVec[i], FnVec[j]);
591 std::swap(addrTakenF, addrTakenG);
594 if (addrTakenF && addrTakenG) {
605 ++NumFunctionsMerged;
609 // ===----------------------------------------------------------------------===
611 // ===----------------------------------------------------------------------===
613 bool MergeFunctions::runOnModule(Module &M) {
614 bool Changed = false;
616 std::map<unsigned long, std::vector<Function *> > FnMap;
618 for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
619 if (F->isDeclaration() || F->isIntrinsic())
622 FnMap[hash(F)].push_back(F);
625 // TODO: instead of running in a loop, we could also fold functions in
626 // callgraph order. Constructing the CFG probably isn't cheaper than just
627 // running in a loop, unless it happened to already be available.
631 LocalChanged = false;
632 DOUT << "size: " << FnMap.size() << "\n";
633 for (std::map<unsigned long, std::vector<Function *> >::iterator
634 I = FnMap.begin(), E = FnMap.end(); I != E; ++I) {
635 std::vector<Function *> &FnVec = I->second;
636 DOUT << "hash (" << I->first << "): " << FnVec.size() << "\n";
638 for (int i = 0, e = FnVec.size(); i != e; ++i) {
639 for (int j = i + 1; j != e; ++j) {
640 bool isEqual = equals(FnVec[i], FnVec[j]);
642 DOUT << " " << FnVec[i]->getName()
643 << (isEqual ? " == " : " != ")
644 << FnVec[j]->getName() << "\n";
647 if (fold(FnVec, i, j)) {
649 FnVec.erase(FnVec.begin() + j);
657 Changed |= LocalChanged;
658 } while (LocalChanged);