1 //===-- DwarfEHPrepare - Prepare exception handling for code generation ---===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass mulches exception handling code into a form adapted to code
11 // generation. Required if using dwarf exception handling.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "dwarfehprepare"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/Analysis/Dominators.h"
18 #include "llvm/CodeGen/Passes.h"
19 #include "llvm/Function.h"
20 #include "llvm/Instructions.h"
21 #include "llvm/IntrinsicInst.h"
22 #include "llvm/Module.h"
23 #include "llvm/Pass.h"
24 #include "llvm/MC/MCAsmInfo.h"
25 #include "llvm/Target/TargetLowering.h"
26 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
27 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
30 STATISTIC(NumLandingPadsSplit, "Number of landing pads split");
31 STATISTIC(NumUnwindsLowered, "Number of unwind instructions lowered");
32 STATISTIC(NumExceptionValuesMoved, "Number of eh.exception calls moved");
33 STATISTIC(NumStackTempsIntroduced, "Number of stack temporaries introduced");
36 class DwarfEHPrepare : public FunctionPass {
37 const TargetLowering *TLI;
40 // The eh.exception intrinsic.
41 Function *ExceptionValueIntrinsic;
43 // _Unwind_Resume or the target equivalent.
44 Constant *RewindFunction;
46 // Dominator info is used when turning stack temporaries into registers.
48 DominanceFrontier *DF;
50 // The function we are running on.
53 // The landing pads for this function.
54 typedef SmallPtrSet<BasicBlock*, 8> BBSet;
57 // Stack temporary used to hold eh.exception values.
58 AllocaInst *ExceptionValueVar;
60 bool NormalizeLandingPads();
62 bool MoveExceptionValueCalls();
63 bool FinishStackTemporaries();
64 bool PromoteStackTemporaries();
66 Instruction *CreateExceptionValueCall(BasicBlock *BB);
67 Instruction *CreateValueLoad(BasicBlock *BB);
69 /// CreateReadOfExceptionValue - Return the result of the eh.exception
70 /// intrinsic by calling the intrinsic if in a landing pad, or loading
71 /// it from the exception value variable otherwise.
72 Instruction *CreateReadOfExceptionValue(BasicBlock *BB) {
73 return LandingPads.count(BB) ?
74 CreateExceptionValueCall(BB) : CreateValueLoad(BB);
78 static char ID; // Pass identification, replacement for typeid.
79 DwarfEHPrepare(const TargetLowering *tli, bool fast) :
80 FunctionPass(&ID), TLI(tli), CompileFast(fast),
81 ExceptionValueIntrinsic(0), RewindFunction(0) {}
83 virtual bool runOnFunction(Function &Fn);
85 // getAnalysisUsage - We need dominance frontiers for memory promotion.
86 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
88 AU.addRequired<DominatorTree>();
89 AU.addPreserved<DominatorTree>();
91 AU.addRequired<DominanceFrontier>();
92 AU.addPreserved<DominanceFrontier>();
95 const char *getPassName() const {
96 return "Exception handling preparation";
100 } // end anonymous namespace
102 char DwarfEHPrepare::ID = 0;
104 FunctionPass *llvm::createDwarfEHPass(const TargetLowering *tli, bool fast) {
105 return new DwarfEHPrepare(tli, fast);
108 /// NormalizeLandingPads - Normalize and discover landing pads, noting them
109 /// in the LandingPads set. A landing pad is normal if the only CFG edges
110 /// that end at it are unwind edges from invoke instructions. If we inlined
111 /// through an invoke we could have a normal branch from the previous
112 /// unwind block through to the landing pad for the original invoke.
113 /// Abnormal landing pads are fixed up by redirecting all unwind edges to
114 /// a new basic block which falls through to the original.
115 bool DwarfEHPrepare::NormalizeLandingPads() {
116 bool Changed = false;
118 const MCAsmInfo *MAI = TLI->getTargetMachine().getMCAsmInfo();
119 bool usingSjLjEH = MAI->getExceptionHandlingType() == ExceptionHandling::SjLj;
121 for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I) {
122 TerminatorInst *TI = I->getTerminator();
123 if (!isa<InvokeInst>(TI))
125 BasicBlock *LPad = TI->getSuccessor(1);
126 // Skip landing pads that have already been normalized.
127 if (LandingPads.count(LPad))
130 // Check that only invoke unwind edges end at the landing pad.
131 bool OnlyUnwoundTo = true;
132 bool SwitchOK = usingSjLjEH;
133 for (pred_iterator PI = pred_begin(LPad), PE = pred_end(LPad);
135 TerminatorInst *PT = (*PI)->getTerminator();
136 // The SjLj dispatch block uses a switch instruction. This is effectively
137 // an unwind edge, so we can disregard it here. There will only ever
138 // be one dispatch, however, so if there are multiple switches, one
139 // of them truly is a normal edge, not an unwind edge.
140 if (SwitchOK && isa<SwitchInst>(PT)) {
144 if (!isa<InvokeInst>(PT) || LPad == PT->getSuccessor(0)) {
145 OnlyUnwoundTo = false;
151 // Only unwind edges lead to the landing pad. Remember the landing pad.
152 LandingPads.insert(LPad);
156 // At least one normal edge ends at the landing pad. Redirect the unwind
157 // edges to a new basic block which falls through into this one.
159 // Create the new basic block.
160 BasicBlock *NewBB = BasicBlock::Create(F->getContext(),
161 LPad->getName() + "_unwind_edge");
163 // Insert it into the function right before the original landing pad.
164 LPad->getParent()->getBasicBlockList().insert(LPad, NewBB);
166 // Redirect unwind edges from the original landing pad to NewBB.
167 for (pred_iterator PI = pred_begin(LPad), PE = pred_end(LPad); PI != PE; ) {
168 TerminatorInst *PT = (*PI++)->getTerminator();
169 if (isa<InvokeInst>(PT) && PT->getSuccessor(1) == LPad)
170 // Unwind to the new block.
171 PT->setSuccessor(1, NewBB);
174 // If there are any PHI nodes in LPad, we need to update them so that they
175 // merge incoming values from NewBB instead.
176 for (BasicBlock::iterator II = LPad->begin(); isa<PHINode>(II); ++II) {
177 PHINode *PN = cast<PHINode>(II);
178 pred_iterator PB = pred_begin(NewBB), PE = pred_end(NewBB);
180 // Check to see if all of the values coming in via unwind edges are the
181 // same. If so, we don't need to create a new PHI node.
182 Value *InVal = PN->getIncomingValueForBlock(*PB);
183 for (pred_iterator PI = PB; PI != PE; ++PI) {
184 if (PI != PB && InVal != PN->getIncomingValueForBlock(*PI)) {
191 // Different unwind edges have different values. Create a new PHI node
193 PHINode *NewPN = PHINode::Create(PN->getType(), PN->getName()+".unwind",
195 // Add an entry for each unwind edge, using the value from the old PHI.
196 for (pred_iterator PI = PB; PI != PE; ++PI)
197 NewPN->addIncoming(PN->getIncomingValueForBlock(*PI), *PI);
199 // Now use this new PHI as the common incoming value for NewBB in PN.
203 // Revector exactly one entry in the PHI node to come from NewBB
204 // and delete all other entries that come from unwind edges. If
205 // there are both normal and unwind edges from the same predecessor,
206 // this leaves an entry for the normal edge.
207 for (pred_iterator PI = PB; PI != PE; ++PI)
208 PN->removeIncomingValue(*PI);
209 PN->addIncoming(InVal, NewBB);
212 // Add a fallthrough from NewBB to the original landing pad.
213 BranchInst::Create(LPad, NewBB);
215 // Now update DominatorTree and DominanceFrontier analysis information.
217 DT->splitBlock(NewBB);
219 DF->splitBlock(NewBB);
221 // Remember the newly constructed landing pad. The original landing pad
222 // LPad is no longer a landing pad now that all unwind edges have been
223 // revectored to NewBB.
224 LandingPads.insert(NewBB);
225 ++NumLandingPadsSplit;
232 /// LowerUnwinds - Turn unwind instructions into calls to _Unwind_Resume,
233 /// rethrowing any previously caught exception. This will crash horribly
234 /// at runtime if there is no such exception: using unwind to throw a new
235 /// exception is currently not supported.
236 bool DwarfEHPrepare::LowerUnwinds() {
237 SmallVector<TerminatorInst*, 16> UnwindInsts;
239 for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I) {
240 TerminatorInst *TI = I->getTerminator();
241 if (isa<UnwindInst>(TI))
242 UnwindInsts.push_back(TI);
245 if (UnwindInsts.empty()) return false;
247 // Find the rewind function if we didn't already.
248 if (!RewindFunction) {
249 LLVMContext &Ctx = UnwindInsts[0]->getContext();
250 std::vector<const Type*>
251 Params(1, Type::getInt8PtrTy(Ctx));
252 FunctionType *FTy = FunctionType::get(Type::getVoidTy(Ctx),
254 const char *RewindName = TLI->getLibcallName(RTLIB::UNWIND_RESUME);
255 RewindFunction = F->getParent()->getOrInsertFunction(RewindName, FTy);
258 bool Changed = false;
260 for (SmallVectorImpl<TerminatorInst*>::iterator
261 I = UnwindInsts.begin(), E = UnwindInsts.end(); I != E; ++I) {
262 TerminatorInst *TI = *I;
264 // Replace the unwind instruction with a call to _Unwind_Resume (or the
265 // appropriate target equivalent) followed by an UnreachableInst.
267 // Create the call...
268 CallInst *CI = CallInst::Create(RewindFunction,
269 CreateReadOfExceptionValue(TI->getParent()),
271 CI->setCallingConv(TLI->getLibcallCallingConv(RTLIB::UNWIND_RESUME));
272 // ...followed by an UnreachableInst.
273 new UnreachableInst(TI->getContext(), TI);
275 // Nuke the unwind instruction.
276 TI->eraseFromParent();
284 /// MoveExceptionValueCalls - Ensure that eh.exception is only ever called from
285 /// landing pads by replacing calls outside of landing pads with loads from a
286 /// stack temporary. Move eh.exception calls inside landing pads to the start
287 /// of the landing pad (optional, but may make things simpler for later passes).
288 bool DwarfEHPrepare::MoveExceptionValueCalls() {
289 // If the eh.exception intrinsic is not declared in the module then there is
290 // nothing to do. Speed up compilation by checking for this common case.
291 if (!ExceptionValueIntrinsic &&
292 !F->getParent()->getFunction(Intrinsic::getName(Intrinsic::eh_exception)))
295 bool Changed = false;
297 for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
298 for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;)
299 if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(II++))
300 if (CI->getIntrinsicID() == Intrinsic::eh_exception) {
301 if (!CI->use_empty()) {
302 Value *ExceptionValue = CreateReadOfExceptionValue(BB);
303 if (CI == ExceptionValue) {
304 // The call was at the start of a landing pad - leave it alone.
305 assert(LandingPads.count(BB) &&
306 "Created eh.exception call outside landing pad!");
309 CI->replaceAllUsesWith(ExceptionValue);
311 CI->eraseFromParent();
312 ++NumExceptionValuesMoved;
320 /// FinishStackTemporaries - If we introduced a stack variable to hold the
321 /// exception value then initialize it in each landing pad.
322 bool DwarfEHPrepare::FinishStackTemporaries() {
323 if (!ExceptionValueVar)
327 bool Changed = false;
329 // Make sure that there is a store of the exception value at the start of
331 for (BBSet::iterator LI = LandingPads.begin(), LE = LandingPads.end();
333 Instruction *ExceptionValue = CreateReadOfExceptionValue(*LI);
334 Instruction *Store = new StoreInst(ExceptionValue, ExceptionValueVar);
335 Store->insertAfter(ExceptionValue);
342 /// PromoteStackTemporaries - Turn any stack temporaries we introduced into
343 /// registers if possible.
344 bool DwarfEHPrepare::PromoteStackTemporaries() {
345 if (ExceptionValueVar && DT && DF && isAllocaPromotable(ExceptionValueVar)) {
346 // Turn the exception temporary into registers and phi nodes if possible.
347 std::vector<AllocaInst*> Allocas(1, ExceptionValueVar);
348 PromoteMemToReg(Allocas, *DT, *DF);
354 /// CreateExceptionValueCall - Insert a call to the eh.exception intrinsic at
355 /// the start of the basic block (unless there already is one, in which case
356 /// the existing call is returned).
357 Instruction *DwarfEHPrepare::CreateExceptionValueCall(BasicBlock *BB) {
358 Instruction *Start = BB->getFirstNonPHI();
359 // Is this a call to eh.exception?
360 if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(Start))
361 if (CI->getIntrinsicID() == Intrinsic::eh_exception)
362 // Reuse the existing call.
365 // Find the eh.exception intrinsic if we didn't already.
366 if (!ExceptionValueIntrinsic)
367 ExceptionValueIntrinsic = Intrinsic::getDeclaration(F->getParent(),
368 Intrinsic::eh_exception);
371 return CallInst::Create(ExceptionValueIntrinsic, "eh.value.call", Start);
374 /// CreateValueLoad - Insert a load of the exception value stack variable
375 /// (creating it if necessary) at the start of the basic block (unless
376 /// there already is a load, in which case the existing load is returned).
377 Instruction *DwarfEHPrepare::CreateValueLoad(BasicBlock *BB) {
378 Instruction *Start = BB->getFirstNonPHI();
379 // Is this a load of the exception temporary?
380 if (ExceptionValueVar)
381 if (LoadInst* LI = dyn_cast<LoadInst>(Start))
382 if (LI->getPointerOperand() == ExceptionValueVar)
383 // Reuse the existing load.
386 // Create the temporary if we didn't already.
387 if (!ExceptionValueVar) {
388 ExceptionValueVar = new AllocaInst(PointerType::getUnqual(
389 Type::getInt8Ty(BB->getContext())), "eh.value", F->begin()->begin());
390 ++NumStackTempsIntroduced;
394 return new LoadInst(ExceptionValueVar, "eh.value.load", Start);
397 bool DwarfEHPrepare::runOnFunction(Function &Fn) {
398 bool Changed = false;
400 // Initialize internal state.
401 DT = getAnalysisIfAvailable<DominatorTree>();
402 DF = getAnalysisIfAvailable<DominanceFrontier>();
403 ExceptionValueVar = 0;
406 // Ensure that only unwind edges end at landing pads (a landing pad is a
407 // basic block where an invoke unwind edge ends).
408 Changed |= NormalizeLandingPads();
410 // Turn unwind instructions into libcalls.
411 Changed |= LowerUnwinds();
413 // TODO: Move eh.selector calls to landing pads and combine them.
415 // Move eh.exception calls to landing pads.
416 Changed |= MoveExceptionValueCalls();
418 // Initialize any stack temporaries we introduced.
419 Changed |= FinishStackTemporaries();
421 // Turn any stack temporaries into registers if possible.
423 Changed |= PromoteStackTemporaries();