1 //===----- JITDwarfEmitter.cpp - Write dwarf tables into memory -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a JITDwarfEmitter object that is used by the JIT to
11 // write dwarf tables to memory.
13 //===----------------------------------------------------------------------===//
16 #include "JITDwarfEmitter.h"
17 #include "llvm/Function.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/CodeGen/MachineCodeEmitter.h"
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/CodeGen/MachineLocation.h"
22 #include "llvm/CodeGen/MachineModuleInfo.h"
23 #include "llvm/ExecutionEngine/JITMemoryManager.h"
24 #include "llvm/Target/TargetAsmInfo.h"
25 #include "llvm/Target/TargetData.h"
26 #include "llvm/Target/TargetInstrInfo.h"
27 #include "llvm/Target/TargetFrameInfo.h"
28 #include "llvm/Target/TargetMachine.h"
29 #include "llvm/Target/TargetRegisterInfo.h"
33 JITDwarfEmitter::JITDwarfEmitter(JIT& theJit) : Jit(theJit) {}
36 unsigned char* JITDwarfEmitter::EmitDwarfTable(MachineFunction& F,
37 MachineCodeEmitter& mce,
38 unsigned char* StartFunction,
39 unsigned char* EndFunction) {
40 const TargetMachine& TM = F.getTarget();
41 TD = TM.getTargetData();
42 needsIndirectEncoding = TM.getTargetAsmInfo()->getNeedsIndirectEncoding();
43 stackGrowthDirection = TM.getFrameInfo()->getStackGrowthDirection();
44 RI = TM.getRegisterInfo();
47 unsigned char* ExceptionTable = EmitExceptionTable(&F, StartFunction,
50 unsigned char* Result = 0;
51 unsigned char* EHFramePtr = 0;
53 const std::vector<Function *> Personalities = MMI->getPersonalities();
54 EHFramePtr = EmitCommonEHFrame(Personalities[MMI->getPersonalityIndex()]);
56 Result = EmitEHFrame(Personalities[MMI->getPersonalityIndex()], EHFramePtr,
57 StartFunction, EndFunction, ExceptionTable);
64 JITDwarfEmitter::EmitFrameMoves(intptr_t BaseLabelPtr,
65 const std::vector<MachineMove> &Moves) const {
66 unsigned PointerSize = TD->getPointerSize();
67 int stackGrowth = stackGrowthDirection == TargetFrameInfo::StackGrowsUp ?
68 PointerSize : -PointerSize;
70 unsigned BaseLabelID = 0;
72 for (unsigned i = 0, N = Moves.size(); i < N; ++i) {
73 const MachineMove &Move = Moves[i];
74 unsigned LabelID = Move.getLabelID();
77 LabelID = MMI->MappedLabel(LabelID);
79 // Throw out move if the label is invalid.
80 if (!LabelID) continue;
83 intptr_t LabelPtr = 0;
84 if (LabelID) LabelPtr = MCE->getLabelAddress(LabelID);
86 const MachineLocation &Dst = Move.getDestination();
87 const MachineLocation &Src = Move.getSource();
89 // Advance row if new location.
90 if (BaseLabelPtr && LabelID && (BaseLabelID != LabelID || !IsLocal)) {
91 MCE->emitByte(dwarf::DW_CFA_advance_loc4);
92 if (PointerSize == 8) {
93 MCE->emitInt64(LabelPtr - BaseLabelPtr);
95 MCE->emitInt32(LabelPtr - BaseLabelPtr);
98 BaseLabelID = LabelID;
99 BaseLabelPtr = LabelPtr;
104 if (Dst.isRegister() && Dst.getRegister() == MachineLocation::VirtualFP) {
105 if (!Src.isRegister()) {
106 if (Src.getRegister() == MachineLocation::VirtualFP) {
107 MCE->emitByte(dwarf::DW_CFA_def_cfa_offset);
109 MCE->emitByte(dwarf::DW_CFA_def_cfa);
110 MCE->emitULEB128Bytes(RI->getDwarfRegNum(Src.getRegister(), true));
113 int Offset = -Src.getOffset();
115 MCE->emitULEB128Bytes(Offset);
117 assert(0 && "Machine move no supported yet.");
119 } else if (Src.isRegister() &&
120 Src.getRegister() == MachineLocation::VirtualFP) {
121 if (Dst.isRegister()) {
122 MCE->emitByte(dwarf::DW_CFA_def_cfa_register);
123 MCE->emitULEB128Bytes(RI->getDwarfRegNum(Dst.getRegister(), true));
125 assert(0 && "Machine move no supported yet.");
128 unsigned Reg = RI->getDwarfRegNum(Src.getRegister(), true);
129 int Offset = Dst.getOffset() / stackGrowth;
132 MCE->emitByte(dwarf::DW_CFA_offset_extended_sf);
133 MCE->emitULEB128Bytes(Reg);
134 MCE->emitSLEB128Bytes(Offset);
135 } else if (Reg < 64) {
136 MCE->emitByte(dwarf::DW_CFA_offset + Reg);
137 MCE->emitULEB128Bytes(Offset);
139 MCE->emitByte(dwarf::DW_CFA_offset_extended);
140 MCE->emitULEB128Bytes(Reg);
141 MCE->emitULEB128Bytes(Offset);
147 /// SharedTypeIds - How many leading type ids two landing pads have in common.
148 static unsigned SharedTypeIds(const LandingPadInfo *L,
149 const LandingPadInfo *R) {
150 const std::vector<int> &LIds = L->TypeIds, &RIds = R->TypeIds;
151 unsigned LSize = LIds.size(), RSize = RIds.size();
152 unsigned MinSize = LSize < RSize ? LSize : RSize;
155 for (; Count != MinSize; ++Count)
156 if (LIds[Count] != RIds[Count])
163 /// PadLT - Order landing pads lexicographically by type id.
164 static bool PadLT(const LandingPadInfo *L, const LandingPadInfo *R) {
165 const std::vector<int> &LIds = L->TypeIds, &RIds = R->TypeIds;
166 unsigned LSize = LIds.size(), RSize = RIds.size();
167 unsigned MinSize = LSize < RSize ? LSize : RSize;
169 for (unsigned i = 0; i != MinSize; ++i)
170 if (LIds[i] != RIds[i])
171 return LIds[i] < RIds[i];
173 return LSize < RSize;
179 static inline unsigned getEmptyKey() { return -1U; }
180 static inline unsigned getTombstoneKey() { return -2U; }
181 static unsigned getHashValue(const unsigned &Key) { return Key; }
182 static bool isEqual(unsigned LHS, unsigned RHS) { return LHS == RHS; }
183 static bool isPod() { return true; }
186 /// ActionEntry - Structure describing an entry in the actions table.
188 int ValueForTypeID; // The value to write - may not be equal to the type id.
190 struct ActionEntry *Previous;
193 /// PadRange - Structure holding a try-range and the associated landing pad.
195 // The index of the landing pad.
197 // The index of the begin and end labels in the landing pad's label lists.
201 typedef DenseMap<unsigned, PadRange, KeyInfo> RangeMapType;
203 /// CallSiteEntry - Structure describing an entry in the call-site table.
204 struct CallSiteEntry {
205 unsigned BeginLabel; // zero indicates the start of the function.
206 unsigned EndLabel; // zero indicates the end of the function.
207 unsigned PadLabel; // zero indicates that there is no landing pad.
213 unsigned char* JITDwarfEmitter::EmitExceptionTable(MachineFunction* MF,
214 unsigned char* StartFunction,
215 unsigned char* EndFunction) const {
216 // Map all labels and get rid of any dead landing pads.
217 MMI->TidyLandingPads();
219 const std::vector<GlobalVariable *> &TypeInfos = MMI->getTypeInfos();
220 const std::vector<unsigned> &FilterIds = MMI->getFilterIds();
221 const std::vector<LandingPadInfo> &PadInfos = MMI->getLandingPads();
222 if (PadInfos.empty()) return 0;
224 // Sort the landing pads in order of their type ids. This is used to fold
225 // duplicate actions.
226 SmallVector<const LandingPadInfo *, 64> LandingPads;
227 LandingPads.reserve(PadInfos.size());
228 for (unsigned i = 0, N = PadInfos.size(); i != N; ++i)
229 LandingPads.push_back(&PadInfos[i]);
230 std::sort(LandingPads.begin(), LandingPads.end(), PadLT);
232 // Negative type ids index into FilterIds, positive type ids index into
233 // TypeInfos. The value written for a positive type id is just the type
234 // id itself. For a negative type id, however, the value written is the
235 // (negative) byte offset of the corresponding FilterIds entry. The byte
236 // offset is usually equal to the type id, because the FilterIds entries
237 // are written using a variable width encoding which outputs one byte per
238 // entry as long as the value written is not too large, but can differ.
239 // This kind of complication does not occur for positive type ids because
240 // type infos are output using a fixed width encoding.
241 // FilterOffsets[i] holds the byte offset corresponding to FilterIds[i].
242 SmallVector<int, 16> FilterOffsets;
243 FilterOffsets.reserve(FilterIds.size());
245 for(std::vector<unsigned>::const_iterator I = FilterIds.begin(),
246 E = FilterIds.end(); I != E; ++I) {
247 FilterOffsets.push_back(Offset);
248 Offset -= TargetAsmInfo::getULEB128Size(*I);
251 // Compute the actions table and gather the first action index for each
253 SmallVector<ActionEntry, 32> Actions;
254 SmallVector<unsigned, 64> FirstActions;
255 FirstActions.reserve(LandingPads.size());
258 unsigned SizeActions = 0;
259 for (unsigned i = 0, N = LandingPads.size(); i != N; ++i) {
260 const LandingPadInfo *LP = LandingPads[i];
261 const std::vector<int> &TypeIds = LP->TypeIds;
262 const unsigned NumShared = i ? SharedTypeIds(LP, LandingPads[i-1]) : 0;
263 unsigned SizeSiteActions = 0;
265 if (NumShared < TypeIds.size()) {
266 unsigned SizeAction = 0;
267 ActionEntry *PrevAction = 0;
270 const unsigned SizePrevIds = LandingPads[i-1]->TypeIds.size();
271 assert(Actions.size());
272 PrevAction = &Actions.back();
273 SizeAction = TargetAsmInfo::getSLEB128Size(PrevAction->NextAction) +
274 TargetAsmInfo::getSLEB128Size(PrevAction->ValueForTypeID);
275 for (unsigned j = NumShared; j != SizePrevIds; ++j) {
276 SizeAction -= TargetAsmInfo::getSLEB128Size(PrevAction->ValueForTypeID);
277 SizeAction += -PrevAction->NextAction;
278 PrevAction = PrevAction->Previous;
282 // Compute the actions.
283 for (unsigned I = NumShared, M = TypeIds.size(); I != M; ++I) {
284 int TypeID = TypeIds[I];
285 assert(-1-TypeID < (int)FilterOffsets.size() && "Unknown filter id!");
286 int ValueForTypeID = TypeID < 0 ? FilterOffsets[-1 - TypeID] : TypeID;
287 unsigned SizeTypeID = TargetAsmInfo::getSLEB128Size(ValueForTypeID);
289 int NextAction = SizeAction ? -(SizeAction + SizeTypeID) : 0;
290 SizeAction = SizeTypeID + TargetAsmInfo::getSLEB128Size(NextAction);
291 SizeSiteActions += SizeAction;
293 ActionEntry Action = {ValueForTypeID, NextAction, PrevAction};
294 Actions.push_back(Action);
296 PrevAction = &Actions.back();
299 // Record the first action of the landing pad site.
300 FirstAction = SizeActions + SizeSiteActions - SizeAction + 1;
301 } // else identical - re-use previous FirstAction
303 FirstActions.push_back(FirstAction);
305 // Compute this sites contribution to size.
306 SizeActions += SizeSiteActions;
309 // Compute the call-site table. Entries must be ordered by address.
310 SmallVector<CallSiteEntry, 64> CallSites;
313 for (unsigned i = 0, N = LandingPads.size(); i != N; ++i) {
314 const LandingPadInfo *LandingPad = LandingPads[i];
315 for (unsigned j=0, E = LandingPad->BeginLabels.size(); j != E; ++j) {
316 unsigned BeginLabel = LandingPad->BeginLabels[j];
317 assert(!PadMap.count(BeginLabel) && "Duplicate landing pad labels!");
318 PadRange P = { i, j };
319 PadMap[BeginLabel] = P;
323 bool MayThrow = false;
324 unsigned LastLabel = 0;
325 for (MachineFunction::const_iterator I = MF->begin(), E = MF->end();
327 for (MachineBasicBlock::const_iterator MI = I->begin(), E = I->end();
329 if (!MI->isLabel()) {
330 MayThrow |= MI->getDesc().isCall();
334 unsigned BeginLabel = MI->getOperand(0).getImm();
335 assert(BeginLabel && "Invalid label!");
337 if (BeginLabel == LastLabel)
340 RangeMapType::iterator L = PadMap.find(BeginLabel);
342 if (L == PadMap.end())
345 PadRange P = L->second;
346 const LandingPadInfo *LandingPad = LandingPads[P.PadIndex];
348 assert(BeginLabel == LandingPad->BeginLabels[P.RangeIndex] &&
349 "Inconsistent landing pad map!");
351 // If some instruction between the previous try-range and this one may
352 // throw, create a call-site entry with no landing pad for the region
353 // between the try-ranges.
355 CallSiteEntry Site = {LastLabel, BeginLabel, 0, 0};
356 CallSites.push_back(Site);
359 LastLabel = LandingPad->EndLabels[P.RangeIndex];
360 CallSiteEntry Site = {BeginLabel, LastLabel,
361 LandingPad->LandingPadLabel, FirstActions[P.PadIndex]};
363 assert(Site.BeginLabel && Site.EndLabel && Site.PadLabel &&
364 "Invalid landing pad!");
366 // Try to merge with the previous call-site.
367 if (CallSites.size()) {
368 CallSiteEntry &Prev = CallSites.back();
369 if (Site.PadLabel == Prev.PadLabel && Site.Action == Prev.Action) {
370 // Extend the range of the previous entry.
371 Prev.EndLabel = Site.EndLabel;
376 // Otherwise, create a new call-site.
377 CallSites.push_back(Site);
380 // If some instruction between the previous try-range and the end of the
381 // function may throw, create a call-site entry with no landing pad for the
382 // region following the try-range.
384 CallSiteEntry Site = {LastLabel, 0, 0, 0};
385 CallSites.push_back(Site);
389 unsigned SizeSites = CallSites.size() * (sizeof(int32_t) + // Site start.
390 sizeof(int32_t) + // Site length.
391 sizeof(int32_t)); // Landing pad.
392 for (unsigned i = 0, e = CallSites.size(); i < e; ++i)
393 SizeSites += TargetAsmInfo::getULEB128Size(CallSites[i].Action);
395 unsigned SizeTypes = TypeInfos.size() * TD->getPointerSize();
397 unsigned TypeOffset = sizeof(int8_t) + // Call site format
398 // Call-site table length
399 TargetAsmInfo::getULEB128Size(SizeSites) +
400 SizeSites + SizeActions + SizeTypes;
402 unsigned TotalSize = sizeof(int8_t) + // LPStart format
403 sizeof(int8_t) + // TType format
404 TargetAsmInfo::getULEB128Size(TypeOffset) + // TType base offset
407 unsigned SizeAlign = (4 - TotalSize) & 3;
409 // Begin the exception table.
410 MCE->emitAlignment(4);
411 for (unsigned i = 0; i != SizeAlign; ++i) {
413 // Asm->EOL("Padding");
416 unsigned char* DwarfExceptionTable = (unsigned char*)MCE->getCurrentPCValue();
419 MCE->emitByte(dwarf::DW_EH_PE_omit);
420 // Asm->EOL("LPStart format (DW_EH_PE_omit)");
421 MCE->emitByte(dwarf::DW_EH_PE_absptr);
422 // Asm->EOL("TType format (DW_EH_PE_absptr)");
423 MCE->emitULEB128Bytes(TypeOffset);
424 // Asm->EOL("TType base offset");
425 MCE->emitByte(dwarf::DW_EH_PE_udata4);
426 // Asm->EOL("Call site format (DW_EH_PE_udata4)");
427 MCE->emitULEB128Bytes(SizeSites);
428 // Asm->EOL("Call-site table length");
430 // Emit the landing pad site information.
431 for (unsigned i = 0; i < CallSites.size(); ++i) {
432 CallSiteEntry &S = CallSites[i];
433 intptr_t BeginLabelPtr = 0;
434 intptr_t EndLabelPtr = 0;
437 BeginLabelPtr = (intptr_t)StartFunction;
438 if (TD->getPointerSize() == sizeof(int32_t))
443 BeginLabelPtr = MCE->getLabelAddress(S.BeginLabel);
444 if (TD->getPointerSize() == sizeof(int32_t))
445 MCE->emitInt32(BeginLabelPtr - (intptr_t)StartFunction);
447 MCE->emitInt64(BeginLabelPtr - (intptr_t)StartFunction);
450 // Asm->EOL("Region start");
453 EndLabelPtr = (intptr_t)EndFunction;
454 if (TD->getPointerSize() == sizeof(int32_t))
455 MCE->emitInt32((intptr_t)EndFunction - BeginLabelPtr);
457 MCE->emitInt64((intptr_t)EndFunction - BeginLabelPtr);
459 EndLabelPtr = MCE->getLabelAddress(S.EndLabel);
460 if (TD->getPointerSize() == sizeof(int32_t))
461 MCE->emitInt32(EndLabelPtr - BeginLabelPtr);
463 MCE->emitInt64(EndLabelPtr - BeginLabelPtr);
465 //Asm->EOL("Region length");
468 if (TD->getPointerSize() == sizeof(int32_t))
473 unsigned PadLabelPtr = MCE->getLabelAddress(S.PadLabel);
474 if (TD->getPointerSize() == sizeof(int32_t))
475 MCE->emitInt32(PadLabelPtr - (intptr_t)StartFunction);
477 MCE->emitInt64(PadLabelPtr - (intptr_t)StartFunction);
479 // Asm->EOL("Landing pad");
481 MCE->emitULEB128Bytes(S.Action);
482 // Asm->EOL("Action");
486 for (unsigned I = 0, N = Actions.size(); I != N; ++I) {
487 ActionEntry &Action = Actions[I];
489 MCE->emitSLEB128Bytes(Action.ValueForTypeID);
490 //Asm->EOL("TypeInfo index");
491 MCE->emitSLEB128Bytes(Action.NextAction);
492 //Asm->EOL("Next action");
495 // Emit the type ids.
496 for (unsigned M = TypeInfos.size(); M; --M) {
497 GlobalVariable *GV = TypeInfos[M - 1];
500 if (TD->getPointerSize() == sizeof(int32_t)) {
501 MCE->emitInt32((intptr_t)Jit.getOrEmitGlobalVariable(GV));
503 MCE->emitInt64((intptr_t)Jit.getOrEmitGlobalVariable(GV));
506 if (TD->getPointerSize() == sizeof(int32_t))
511 // Asm->EOL("TypeInfo");
514 // Emit the filter typeids.
515 for (unsigned j = 0, M = FilterIds.size(); j < M; ++j) {
516 unsigned TypeID = FilterIds[j];
517 MCE->emitULEB128Bytes(TypeID);
518 //Asm->EOL("Filter TypeInfo index");
521 MCE->emitAlignment(4);
523 return DwarfExceptionTable;
527 JITDwarfEmitter::EmitCommonEHFrame(const Function* Personality) const {
528 unsigned PointerSize = TD->getPointerSize();
529 int stackGrowth = stackGrowthDirection == TargetFrameInfo::StackGrowsUp ?
530 PointerSize : -PointerSize;
532 unsigned char* StartCommonPtr = (unsigned char*)MCE->getCurrentPCValue();
533 // EH Common Frame header
534 MCE->allocateSpace(PointerSize, 0);
535 unsigned char* FrameCommonBeginPtr = (unsigned char*)MCE->getCurrentPCValue();
536 MCE->emitInt32((int)0);
537 MCE->emitByte(dwarf::DW_CIE_VERSION);
538 MCE->emitString(Personality ? "zPLR" : "zR");
539 MCE->emitULEB128Bytes(1);
540 MCE->emitSLEB128Bytes(stackGrowth);
541 MCE->emitByte(RI->getDwarfRegNum(RI->getRARegister(), true));
544 MCE->emitULEB128Bytes(7);
546 // Direct encoding, because we use the function pointer.
547 MCE->emitByte(dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4);
549 if (PointerSize == 8)
550 MCE->emitInt64((intptr_t)Jit.getPointerToGlobal(Personality) -
551 MCE->getCurrentPCValue());
553 MCE->emitInt32((intptr_t)Jit.getPointerToGlobal(Personality) -
554 MCE->getCurrentPCValue());
556 MCE->emitULEB128Bytes(dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4);
557 MCE->emitULEB128Bytes(dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4);
560 MCE->emitULEB128Bytes(1);
561 MCE->emitULEB128Bytes(dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4);
564 std::vector<MachineMove> Moves;
565 RI->getInitialFrameState(Moves);
566 EmitFrameMoves(0, Moves);
567 MCE->emitAlignment(4);
569 MCE->emitAt((uintptr_t*)StartCommonPtr,
570 (uintptr_t)((unsigned char*)MCE->getCurrentPCValue() -
571 FrameCommonBeginPtr));
573 return StartCommonPtr;
578 JITDwarfEmitter::EmitEHFrame(const Function* Personality,
579 unsigned char* StartCommonPtr,
580 unsigned char* StartFunction,
581 unsigned char* EndFunction,
582 unsigned char* ExceptionTable) const {
583 unsigned PointerSize = TD->getPointerSize();
586 unsigned char* StartEHPtr = (unsigned char*)MCE->getCurrentPCValue();
587 MCE->allocateSpace(PointerSize, 0);
588 unsigned char* FrameBeginPtr = (unsigned char*)MCE->getCurrentPCValue();
590 if (PointerSize == 8) {
591 MCE->emitInt64(FrameBeginPtr - StartCommonPtr);
592 MCE->emitInt64(StartFunction - (unsigned char*)MCE->getCurrentPCValue());
593 MCE->emitInt64(EndFunction - StartFunction);
595 MCE->emitInt32(FrameBeginPtr - StartCommonPtr);
596 MCE->emitInt32(StartFunction - (unsigned char*)MCE->getCurrentPCValue());
597 MCE->emitInt32(EndFunction - StartFunction);
600 // If there is a personality and landing pads then point to the language
601 // specific data area in the exception table.
602 if (MMI->getPersonalityIndex()) {
603 MCE->emitULEB128Bytes(4);
605 if (!MMI->getLandingPads().empty()) {
606 if (PointerSize == 8)
607 MCE->emitInt64(ExceptionTable - (unsigned char*)MCE->getCurrentPCValue());
609 MCE->emitInt32(ExceptionTable - (unsigned char*)MCE->getCurrentPCValue());
610 } else if (PointerSize == 8) {
611 MCE->emitInt64((int)0);
613 MCE->emitInt32((int)0);
616 MCE->emitULEB128Bytes(0);
619 // Indicate locations of function specific callee saved registers in
621 EmitFrameMoves((intptr_t)StartFunction, MMI->getFrameMoves());
623 MCE->emitAlignment(4);
625 // Indicate the size of the table
626 MCE->emitAt((uintptr_t*)StartEHPtr,
627 (uintptr_t)((unsigned char*)MCE->getCurrentPCValue() -
630 // Double zeroes for the unwind runtime
631 if (PointerSize == 8) {
643 unsigned JITDwarfEmitter::GetDwarfTableSizeInBytes(MachineFunction& F,
644 MachineCodeEmitter& mce,
645 unsigned char* StartFunction,
646 unsigned char* EndFunction) {
647 const TargetMachine& TM = F.getTarget();
648 TD = TM.getTargetData();
649 needsIndirectEncoding = TM.getTargetAsmInfo()->getNeedsIndirectEncoding();
650 stackGrowthDirection = TM.getFrameInfo()->getStackGrowthDirection();
651 RI = TM.getRegisterInfo();
653 unsigned FinalSize = 0;
655 FinalSize += GetExceptionTableSizeInBytes(&F);
657 const std::vector<Function *> Personalities = MMI->getPersonalities();
658 FinalSize += GetCommonEHFrameSizeInBytes(Personalities[MMI->getPersonalityIndex()]);
660 FinalSize += GetEHFrameSizeInBytes(Personalities[MMI->getPersonalityIndex()], StartFunction);
665 /// RoundUpToAlign - Add the specified alignment to FinalSize and returns
667 static unsigned RoundUpToAlign(unsigned FinalSize, unsigned Alignment) {
668 if (Alignment == 0) Alignment = 1;
669 // Since we do not know where the buffer will be allocated, be pessimistic.
670 return FinalSize + Alignment;
674 JITDwarfEmitter::GetEHFrameSizeInBytes(const Function* Personality,
675 unsigned char* StartFunction) const {
676 unsigned PointerSize = TD->getPointerSize();
677 unsigned FinalSize = 0;
679 FinalSize += PointerSize;
681 FinalSize += 3 * PointerSize;
682 // If there is a personality and landing pads then point to the language
683 // specific data area in the exception table.
684 if (MMI->getPersonalityIndex()) {
685 FinalSize += TargetAsmInfo::getULEB128Size(4);
686 FinalSize += PointerSize;
688 FinalSize += TargetAsmInfo::getULEB128Size(0);
691 // Indicate locations of function specific callee saved registers in
693 FinalSize += GetFrameMovesSizeInBytes((intptr_t)StartFunction,
694 MMI->getFrameMoves());
696 FinalSize = RoundUpToAlign(FinalSize, 4);
698 // Double zeroes for the unwind runtime
699 FinalSize += 2 * PointerSize;
704 unsigned JITDwarfEmitter::GetCommonEHFrameSizeInBytes(const Function* Personality)
707 unsigned PointerSize = TD->getPointerSize();
708 int stackGrowth = stackGrowthDirection == TargetFrameInfo::StackGrowsUp ?
709 PointerSize : -PointerSize;
710 unsigned FinalSize = 0;
711 // EH Common Frame header
712 FinalSize += PointerSize;
715 FinalSize += Personality ? 5 : 3; // "zPLR" or "zR"
716 FinalSize += TargetAsmInfo::getULEB128Size(1);
717 FinalSize += TargetAsmInfo::getSLEB128Size(stackGrowth);
721 FinalSize += TargetAsmInfo::getULEB128Size(7);
726 FinalSize += PointerSize;
728 FinalSize += TargetAsmInfo::getULEB128Size(dwarf::DW_EH_PE_pcrel);
729 FinalSize += TargetAsmInfo::getULEB128Size(dwarf::DW_EH_PE_pcrel);
732 FinalSize += TargetAsmInfo::getULEB128Size(1);
733 FinalSize += TargetAsmInfo::getULEB128Size(dwarf::DW_EH_PE_pcrel);
736 std::vector<MachineMove> Moves;
737 RI->getInitialFrameState(Moves);
738 FinalSize += GetFrameMovesSizeInBytes(0, Moves);
739 FinalSize = RoundUpToAlign(FinalSize, 4);
744 JITDwarfEmitter::GetFrameMovesSizeInBytes(intptr_t BaseLabelPtr,
745 const std::vector<MachineMove> &Moves) const {
746 unsigned PointerSize = TD->getPointerSize();
747 int stackGrowth = stackGrowthDirection == TargetFrameInfo::StackGrowsUp ?
748 PointerSize : -PointerSize;
749 bool IsLocal = BaseLabelPtr;
750 unsigned FinalSize = 0;
752 for (unsigned i = 0, N = Moves.size(); i < N; ++i) {
753 const MachineMove &Move = Moves[i];
754 unsigned LabelID = Move.getLabelID();
757 LabelID = MMI->MappedLabel(LabelID);
759 // Throw out move if the label is invalid.
760 if (!LabelID) continue;
763 intptr_t LabelPtr = 0;
764 if (LabelID) LabelPtr = MCE->getLabelAddress(LabelID);
766 const MachineLocation &Dst = Move.getDestination();
767 const MachineLocation &Src = Move.getSource();
769 // Advance row if new location.
770 if (BaseLabelPtr && LabelID && (BaseLabelPtr != LabelPtr || !IsLocal)) {
772 FinalSize += PointerSize;
773 BaseLabelPtr = LabelPtr;
778 if (Dst.isRegister() && Dst.getRegister() == MachineLocation::VirtualFP) {
779 if (!Src.isRegister()) {
780 if (Src.getRegister() == MachineLocation::VirtualFP) {
784 unsigned RegNum = RI->getDwarfRegNum(Src.getRegister(), true);
785 FinalSize += TargetAsmInfo::getULEB128Size(RegNum);
788 int Offset = -Src.getOffset();
790 FinalSize += TargetAsmInfo::getULEB128Size(Offset);
792 assert(0 && "Machine move no supported yet.");
794 } else if (Src.isRegister() &&
795 Src.getRegister() == MachineLocation::VirtualFP) {
796 if (Dst.isRegister()) {
798 unsigned RegNum = RI->getDwarfRegNum(Dst.getRegister(), true);
799 FinalSize += TargetAsmInfo::getULEB128Size(RegNum);
801 assert(0 && "Machine move no supported yet.");
804 unsigned Reg = RI->getDwarfRegNum(Src.getRegister(), true);
805 int Offset = Dst.getOffset() / stackGrowth;
809 FinalSize += TargetAsmInfo::getULEB128Size(Reg);
810 FinalSize += TargetAsmInfo::getSLEB128Size(Offset);
811 } else if (Reg < 64) {
813 FinalSize += TargetAsmInfo::getULEB128Size(Offset);
816 FinalSize += TargetAsmInfo::getULEB128Size(Reg);
817 FinalSize += TargetAsmInfo::getULEB128Size(Offset);
825 JITDwarfEmitter::GetExceptionTableSizeInBytes(MachineFunction* MF) const {
826 unsigned FinalSize = 0;
828 // Map all labels and get rid of any dead landing pads.
829 MMI->TidyLandingPads();
831 const std::vector<GlobalVariable *> &TypeInfos = MMI->getTypeInfos();
832 const std::vector<unsigned> &FilterIds = MMI->getFilterIds();
833 const std::vector<LandingPadInfo> &PadInfos = MMI->getLandingPads();
834 if (PadInfos.empty()) return 0;
836 // Sort the landing pads in order of their type ids. This is used to fold
837 // duplicate actions.
838 SmallVector<const LandingPadInfo *, 64> LandingPads;
839 LandingPads.reserve(PadInfos.size());
840 for (unsigned i = 0, N = PadInfos.size(); i != N; ++i)
841 LandingPads.push_back(&PadInfos[i]);
842 std::sort(LandingPads.begin(), LandingPads.end(), PadLT);
844 // Negative type ids index into FilterIds, positive type ids index into
845 // TypeInfos. The value written for a positive type id is just the type
846 // id itself. For a negative type id, however, the value written is the
847 // (negative) byte offset of the corresponding FilterIds entry. The byte
848 // offset is usually equal to the type id, because the FilterIds entries
849 // are written using a variable width encoding which outputs one byte per
850 // entry as long as the value written is not too large, but can differ.
851 // This kind of complication does not occur for positive type ids because
852 // type infos are output using a fixed width encoding.
853 // FilterOffsets[i] holds the byte offset corresponding to FilterIds[i].
854 SmallVector<int, 16> FilterOffsets;
855 FilterOffsets.reserve(FilterIds.size());
857 for(std::vector<unsigned>::const_iterator I = FilterIds.begin(),
858 E = FilterIds.end(); I != E; ++I) {
859 FilterOffsets.push_back(Offset);
860 Offset -= TargetAsmInfo::getULEB128Size(*I);
863 // Compute the actions table and gather the first action index for each
865 SmallVector<ActionEntry, 32> Actions;
866 SmallVector<unsigned, 64> FirstActions;
867 FirstActions.reserve(LandingPads.size());
870 unsigned SizeActions = 0;
871 for (unsigned i = 0, N = LandingPads.size(); i != N; ++i) {
872 const LandingPadInfo *LP = LandingPads[i];
873 const std::vector<int> &TypeIds = LP->TypeIds;
874 const unsigned NumShared = i ? SharedTypeIds(LP, LandingPads[i-1]) : 0;
875 unsigned SizeSiteActions = 0;
877 if (NumShared < TypeIds.size()) {
878 unsigned SizeAction = 0;
879 ActionEntry *PrevAction = 0;
882 const unsigned SizePrevIds = LandingPads[i-1]->TypeIds.size();
883 assert(Actions.size());
884 PrevAction = &Actions.back();
885 SizeAction = TargetAsmInfo::getSLEB128Size(PrevAction->NextAction) +
886 TargetAsmInfo::getSLEB128Size(PrevAction->ValueForTypeID);
887 for (unsigned j = NumShared; j != SizePrevIds; ++j) {
888 SizeAction -= TargetAsmInfo::getSLEB128Size(PrevAction->ValueForTypeID);
889 SizeAction += -PrevAction->NextAction;
890 PrevAction = PrevAction->Previous;
894 // Compute the actions.
895 for (unsigned I = NumShared, M = TypeIds.size(); I != M; ++I) {
896 int TypeID = TypeIds[I];
897 assert(-1-TypeID < (int)FilterOffsets.size() && "Unknown filter id!");
898 int ValueForTypeID = TypeID < 0 ? FilterOffsets[-1 - TypeID] : TypeID;
899 unsigned SizeTypeID = TargetAsmInfo::getSLEB128Size(ValueForTypeID);
901 int NextAction = SizeAction ? -(SizeAction + SizeTypeID) : 0;
902 SizeAction = SizeTypeID + TargetAsmInfo::getSLEB128Size(NextAction);
903 SizeSiteActions += SizeAction;
905 ActionEntry Action = {ValueForTypeID, NextAction, PrevAction};
906 Actions.push_back(Action);
908 PrevAction = &Actions.back();
911 // Record the first action of the landing pad site.
912 FirstAction = SizeActions + SizeSiteActions - SizeAction + 1;
913 } // else identical - re-use previous FirstAction
915 FirstActions.push_back(FirstAction);
917 // Compute this sites contribution to size.
918 SizeActions += SizeSiteActions;
921 // Compute the call-site table. Entries must be ordered by address.
922 SmallVector<CallSiteEntry, 64> CallSites;
925 for (unsigned i = 0, N = LandingPads.size(); i != N; ++i) {
926 const LandingPadInfo *LandingPad = LandingPads[i];
927 for (unsigned j=0, E = LandingPad->BeginLabels.size(); j != E; ++j) {
928 unsigned BeginLabel = LandingPad->BeginLabels[j];
929 assert(!PadMap.count(BeginLabel) && "Duplicate landing pad labels!");
930 PadRange P = { i, j };
931 PadMap[BeginLabel] = P;
935 bool MayThrow = false;
936 unsigned LastLabel = 0;
937 for (MachineFunction::const_iterator I = MF->begin(), E = MF->end();
939 for (MachineBasicBlock::const_iterator MI = I->begin(), E = I->end();
941 if (!MI->isLabel()) {
942 MayThrow |= MI->getDesc().isCall();
946 unsigned BeginLabel = MI->getOperand(0).getImm();
947 assert(BeginLabel && "Invalid label!");
949 if (BeginLabel == LastLabel)
952 RangeMapType::iterator L = PadMap.find(BeginLabel);
954 if (L == PadMap.end())
957 PadRange P = L->second;
958 const LandingPadInfo *LandingPad = LandingPads[P.PadIndex];
960 assert(BeginLabel == LandingPad->BeginLabels[P.RangeIndex] &&
961 "Inconsistent landing pad map!");
963 // If some instruction between the previous try-range and this one may
964 // throw, create a call-site entry with no landing pad for the region
965 // between the try-ranges.
967 CallSiteEntry Site = {LastLabel, BeginLabel, 0, 0};
968 CallSites.push_back(Site);
971 LastLabel = LandingPad->EndLabels[P.RangeIndex];
972 CallSiteEntry Site = {BeginLabel, LastLabel,
973 LandingPad->LandingPadLabel, FirstActions[P.PadIndex]};
975 assert(Site.BeginLabel && Site.EndLabel && Site.PadLabel &&
976 "Invalid landing pad!");
978 // Try to merge with the previous call-site.
979 if (CallSites.size()) {
980 CallSiteEntry &Prev = CallSites.back();
981 if (Site.PadLabel == Prev.PadLabel && Site.Action == Prev.Action) {
982 // Extend the range of the previous entry.
983 Prev.EndLabel = Site.EndLabel;
988 // Otherwise, create a new call-site.
989 CallSites.push_back(Site);
992 // If some instruction between the previous try-range and the end of the
993 // function may throw, create a call-site entry with no landing pad for the
994 // region following the try-range.
996 CallSiteEntry Site = {LastLabel, 0, 0, 0};
997 CallSites.push_back(Site);
1001 unsigned SizeSites = CallSites.size() * (sizeof(int32_t) + // Site start.
1002 sizeof(int32_t) + // Site length.
1003 sizeof(int32_t)); // Landing pad.
1004 for (unsigned i = 0, e = CallSites.size(); i < e; ++i)
1005 SizeSites += TargetAsmInfo::getULEB128Size(CallSites[i].Action);
1007 unsigned SizeTypes = TypeInfos.size() * TD->getPointerSize();
1009 unsigned TypeOffset = sizeof(int8_t) + // Call site format
1010 // Call-site table length
1011 TargetAsmInfo::getULEB128Size(SizeSites) +
1012 SizeSites + SizeActions + SizeTypes;
1014 unsigned TotalSize = sizeof(int8_t) + // LPStart format
1015 sizeof(int8_t) + // TType format
1016 TargetAsmInfo::getULEB128Size(TypeOffset) + // TType base offset
1019 unsigned SizeAlign = (4 - TotalSize) & 3;
1021 // Begin the exception table.
1022 FinalSize = RoundUpToAlign(FinalSize, 4);
1023 for (unsigned i = 0; i != SizeAlign; ++i) {
1027 unsigned PointerSize = TD->getPointerSize();
1031 // Asm->EOL("LPStart format (DW_EH_PE_omit)");
1033 // Asm->EOL("TType format (DW_EH_PE_absptr)");
1035 // Asm->EOL("TType base offset");
1037 // Asm->EOL("Call site format (DW_EH_PE_udata4)");
1039 // Asm->EOL("Call-site table length");
1041 // Emit the landing pad site information.
1042 for (unsigned i = 0; i < CallSites.size(); ++i) {
1043 CallSiteEntry &S = CallSites[i];
1045 // Asm->EOL("Region start");
1046 FinalSize += PointerSize;
1048 //Asm->EOL("Region length");
1049 FinalSize += PointerSize;
1051 // Asm->EOL("Landing pad");
1052 FinalSize += PointerSize;
1054 FinalSize += TargetAsmInfo::getULEB128Size(S.Action);
1055 // Asm->EOL("Action");
1058 // Emit the actions.
1059 for (unsigned I = 0, N = Actions.size(); I != N; ++I) {
1060 ActionEntry &Action = Actions[I];
1062 //Asm->EOL("TypeInfo index");
1063 FinalSize += TargetAsmInfo::getSLEB128Size(Action.ValueForTypeID);
1064 //Asm->EOL("Next action");
1065 FinalSize += TargetAsmInfo::getSLEB128Size(Action.NextAction);
1068 // Emit the type ids.
1069 for (unsigned M = TypeInfos.size(); M; --M) {
1070 // Asm->EOL("TypeInfo");
1071 FinalSize += PointerSize;
1074 // Emit the filter typeids.
1075 for (unsigned j = 0, M = FilterIds.size(); j < M; ++j) {
1076 unsigned TypeID = FilterIds[j];
1077 FinalSize += TargetAsmInfo::getULEB128Size(TypeID);
1078 //Asm->EOL("Filter TypeInfo index");
1081 FinalSize = RoundUpToAlign(FinalSize, 4);