1 //===- lib/MC/MCAssembler.cpp - Assembler Backend Implementation ----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #define DEBUG_TYPE "assembler"
11 #include "llvm/MC/MCAssembler.h"
12 #include "llvm/MC/MCAsmLayout.h"
13 #include "llvm/MC/MCCodeEmitter.h"
14 #include "llvm/MC/MCExpr.h"
15 #include "llvm/MC/MCObjectWriter.h"
16 #include "llvm/MC/MCSymbol.h"
17 #include "llvm/MC/MCValue.h"
18 #include "llvm/ADT/OwningPtr.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/ADT/Twine.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Support/ErrorHandling.h"
24 #include "llvm/Support/raw_ostream.h"
25 #include "llvm/Target/TargetRegistry.h"
26 #include "llvm/Target/TargetAsmBackend.h"
33 STATISTIC(EmittedFragments, "Number of emitted assembler fragments");
34 STATISTIC(EvaluateFixup, "Number of evaluated fixups");
35 STATISTIC(FragmentLayouts, "Number of fragment layouts");
36 STATISTIC(ObjectBytes, "Number of emitted object file bytes");
37 STATISTIC(RelaxationSteps, "Number of assembler layout and relaxation steps");
38 STATISTIC(RelaxedInstructions, "Number of relaxed instructions");
39 STATISTIC(SectionLayouts, "Number of section layouts");
43 // FIXME FIXME FIXME: There are number of places in this file where we convert
44 // what is a 64-bit assembler value used for computation into a value in the
45 // object file, which may truncate it. We should detect that truncation where
46 // invalid and report errors back.
50 MCAsmLayout::MCAsmLayout(MCAssembler &Asm)
51 : Assembler(Asm), LastValidFragment(0)
53 // Compute the section layout order. Virtual sections must go last.
54 for (MCAssembler::iterator it = Asm.begin(), ie = Asm.end(); it != ie; ++it)
55 if (!Asm.getBackend().isVirtualSection(it->getSection()))
56 SectionOrder.push_back(&*it);
57 for (MCAssembler::iterator it = Asm.begin(), ie = Asm.end(); it != ie; ++it)
58 if (Asm.getBackend().isVirtualSection(it->getSection()))
59 SectionOrder.push_back(&*it);
62 bool MCAsmLayout::isSectionUpToDate(const MCSectionData *SD) const {
63 // The first section is always up-to-date.
64 unsigned Index = SD->getLayoutOrder();
68 // Otherwise, sections are always implicitly computed when the preceeding
69 // fragment is layed out.
70 const MCSectionData *Prev = getSectionOrder()[Index - 1];
71 return isFragmentUpToDate(&(Prev->getFragmentList().back()));
74 bool MCAsmLayout::isFragmentUpToDate(const MCFragment *F) const {
75 return (LastValidFragment &&
76 F->getLayoutOrder() <= LastValidFragment->getLayoutOrder());
79 void MCAsmLayout::UpdateForSlide(MCFragment *F, int SlideAmount) {
80 // We shouldn't have to do anything special to support negative slides, and it
81 // is a perfectly valid thing to do as long as other parts of the system can
82 // guarantee convergence.
83 assert(SlideAmount >= 0 && "Negative slides not yet supported");
85 // Update the layout by simply recomputing the layout for the entire
86 // file. This is trivially correct, but very slow.
88 // FIXME-PERF: This is O(N^2), but will be eliminated once we get smarter.
90 // Layout the sections in order.
94 void MCAsmLayout::FragmentReplaced(MCFragment *Src, MCFragment *Dst) {
95 if (LastValidFragment == Src)
96 LastValidFragment = Dst;
98 Dst->Offset = Src->Offset;
99 Dst->EffectiveSize = Src->EffectiveSize;
102 uint64_t MCAsmLayout::getFragmentAddress(const MCFragment *F) const {
103 assert(F->getParent() && "Missing section()!");
104 return getSectionAddress(F->getParent()) + getFragmentOffset(F);
107 uint64_t MCAsmLayout::getFragmentEffectiveSize(const MCFragment *F) const {
108 assert(F->EffectiveSize != ~UINT64_C(0) && "Address not set!");
109 return F->EffectiveSize;
112 uint64_t MCAsmLayout::getFragmentOffset(const MCFragment *F) const {
113 assert(F->Offset != ~UINT64_C(0) && "Address not set!");
117 uint64_t MCAsmLayout::getSymbolAddress(const MCSymbolData *SD) const {
118 assert(SD->getFragment() && "Invalid getAddress() on undefined symbol!");
119 return getFragmentAddress(SD->getFragment()) + SD->getOffset();
122 uint64_t MCAsmLayout::getSectionAddress(const MCSectionData *SD) const {
123 assert(SD->Address != ~UINT64_C(0) && "Address not set!");
127 uint64_t MCAsmLayout::getSectionAddressSize(const MCSectionData *SD) const {
128 // The size is the last fragment's end offset.
129 const MCFragment &F = SD->getFragmentList().back();
130 return getFragmentOffset(&F) + getFragmentEffectiveSize(&F);
133 uint64_t MCAsmLayout::getSectionFileSize(const MCSectionData *SD) const {
134 // Virtual sections have no file size.
135 if (getAssembler().getBackend().isVirtualSection(SD->getSection()))
138 // Otherwise, the file size is the same as the address space size.
139 return getSectionAddressSize(SD);
142 uint64_t MCAsmLayout::getSectionSize(const MCSectionData *SD) const {
143 // The logical size is the address space size minus any tail padding.
144 uint64_t Size = getSectionAddressSize(SD);
145 const MCAlignFragment *AF =
146 dyn_cast<MCAlignFragment>(&(SD->getFragmentList().back()));
147 if (AF && AF->hasOnlyAlignAddress())
148 Size -= getFragmentEffectiveSize(AF);
155 MCFragment::MCFragment() : Kind(FragmentType(~0)) {
158 MCFragment::MCFragment(FragmentType _Kind, MCSectionData *_Parent)
159 : Kind(_Kind), Parent(_Parent), Atom(0), EffectiveSize(~UINT64_C(0))
162 Parent->getFragmentList().push_back(this);
165 MCFragment::~MCFragment() {
170 MCSectionData::MCSectionData() : Section(0) {}
172 MCSectionData::MCSectionData(const MCSection &_Section, MCAssembler *A)
173 : Section(&_Section),
175 Address(~UINT64_C(0)),
176 HasInstructions(false)
179 A->getSectionList().push_back(this);
184 MCSymbolData::MCSymbolData() : Symbol(0) {}
186 MCSymbolData::MCSymbolData(const MCSymbol &_Symbol, MCFragment *_Fragment,
187 uint64_t _Offset, MCAssembler *A)
188 : Symbol(&_Symbol), Fragment(_Fragment), Offset(_Offset),
189 IsExternal(false), IsPrivateExtern(false),
190 CommonSize(0), CommonAlign(0), Flags(0), Index(0)
193 A->getSymbolList().push_back(this);
198 MCAssembler::MCAssembler(MCContext &_Context, TargetAsmBackend &_Backend,
199 MCCodeEmitter &_Emitter, raw_ostream &_OS)
200 : Context(_Context), Backend(_Backend), Emitter(_Emitter),
201 OS(_OS), RelaxAll(false), SubsectionsViaSymbols(false)
205 MCAssembler::~MCAssembler() {
208 static bool isScatteredFixupFullyResolvedSimple(const MCAssembler &Asm,
209 const MCAsmFixup &Fixup,
210 const MCValue Target,
211 const MCSection *BaseSection) {
212 // The effective fixup address is
213 // addr(atom(A)) + offset(A)
214 // - addr(atom(B)) - offset(B)
215 // - addr(<base symbol>) + <fixup offset from base symbol>
216 // and the offsets are not relocatable, so the fixup is fully resolved when
217 // addr(atom(A)) - addr(atom(B)) - addr(<base symbol>)) == 0.
219 // The simple (Darwin, except on x86_64) way of dealing with this was to
220 // assume that any reference to a temporary symbol *must* be a temporary
221 // symbol in the same atom, unless the sections differ. Therefore, any PCrel
222 // relocation to a temporary symbol (in the same section) is fully
223 // resolved. This also works in conjunction with absolutized .set, which
224 // requires the compiler to use .set to absolutize the differences between
225 // symbols which the compiler knows to be assembly time constants, so we don't
226 // need to worry about considering symbol differences fully resolved.
228 // Non-relative fixups are only resolved if constant.
230 return Target.isAbsolute();
232 // Otherwise, relative fixups are only resolved if not a difference and the
233 // target is a temporary in the same section.
234 if (Target.isAbsolute() || Target.getSymB())
237 const MCSymbol *A = &Target.getSymA()->getSymbol();
238 if (!A->isTemporary() || !A->isInSection() ||
239 &A->getSection() != BaseSection)
245 static bool isScatteredFixupFullyResolved(const MCAssembler &Asm,
246 const MCAsmLayout &Layout,
247 const MCAsmFixup &Fixup,
248 const MCValue Target,
249 const MCSymbolData *BaseSymbol) {
250 // The effective fixup address is
251 // addr(atom(A)) + offset(A)
252 // - addr(atom(B)) - offset(B)
253 // - addr(BaseSymbol) + <fixup offset from base symbol>
254 // and the offsets are not relocatable, so the fixup is fully resolved when
255 // addr(atom(A)) - addr(atom(B)) - addr(BaseSymbol) == 0.
257 // Note that "false" is almost always conservatively correct (it means we emit
258 // a relocation which is unnecessary), except when it would force us to emit a
259 // relocation which the target cannot encode.
261 const MCSymbolData *A_Base = 0, *B_Base = 0;
262 if (const MCSymbolRefExpr *A = Target.getSymA()) {
263 // Modified symbol references cannot be resolved.
264 if (A->getKind() != MCSymbolRefExpr::VK_None)
267 A_Base = Asm.getAtom(Layout, &Asm.getSymbolData(A->getSymbol()));
272 if (const MCSymbolRefExpr *B = Target.getSymB()) {
273 // Modified symbol references cannot be resolved.
274 if (B->getKind() != MCSymbolRefExpr::VK_None)
277 B_Base = Asm.getAtom(Layout, &Asm.getSymbolData(B->getSymbol()));
282 // If there is no base, A and B have to be the same atom for this fixup to be
285 return A_Base == B_Base;
287 // Otherwise, B must be missing and A must be the base.
288 return !B_Base && BaseSymbol == A_Base;
291 bool MCAssembler::isSymbolLinkerVisible(const MCSymbolData *SD) const {
292 // Non-temporary labels should always be visible to the linker.
293 if (!SD->getSymbol().isTemporary())
296 // Absolute temporary labels are never visible.
297 if (!SD->getFragment())
300 // Otherwise, check if the section requires symbols even for temporary labels.
301 return getBackend().doesSectionRequireSymbols(
302 SD->getFragment()->getParent()->getSection());
305 const MCSymbolData *MCAssembler::getAtom(const MCAsmLayout &Layout,
306 const MCSymbolData *SD) const {
307 // Linker visible symbols define atoms.
308 if (isSymbolLinkerVisible(SD))
311 // Absolute and undefined symbols have no defining atom.
312 if (!SD->getFragment())
315 // Non-linker visible symbols in sections which can't be atomized have no
317 if (!getBackend().isSectionAtomizable(
318 SD->getFragment()->getParent()->getSection()))
321 // Otherwise, return the atom for the containing fragment.
322 return SD->getFragment()->getAtom();
325 bool MCAssembler::EvaluateFixup(const MCAsmLayout &Layout,
326 const MCAsmFixup &Fixup, const MCFragment *DF,
327 MCValue &Target, uint64_t &Value) const {
328 ++stats::EvaluateFixup;
330 if (!Fixup.Value->EvaluateAsRelocatable(Target, &Layout))
331 report_fatal_error("expected relocatable expression");
333 // FIXME: How do non-scattered symbols work in ELF? I presume the linker
334 // doesn't support small relocations, but then under what criteria does the
335 // assembler allow symbol differences?
337 Value = Target.getConstant();
340 Emitter.getFixupKindInfo(Fixup.Kind).Flags & MCFixupKindInfo::FKF_IsPCRel;
341 bool IsResolved = true;
342 if (const MCSymbolRefExpr *A = Target.getSymA()) {
343 if (A->getSymbol().isDefined())
344 Value += Layout.getSymbolAddress(&getSymbolData(A->getSymbol()));
348 if (const MCSymbolRefExpr *B = Target.getSymB()) {
349 if (B->getSymbol().isDefined())
350 Value -= Layout.getSymbolAddress(&getSymbolData(B->getSymbol()));
355 // If we are using scattered symbols, determine whether this value is actually
356 // resolved; scattering may cause atoms to move.
357 if (IsResolved && getBackend().hasScatteredSymbols()) {
358 if (getBackend().hasReliableSymbolDifference()) {
359 // If this is a PCrel relocation, find the base atom (identified by its
360 // symbol) that the fixup value is relative to.
361 const MCSymbolData *BaseSymbol = 0;
363 BaseSymbol = DF->getAtom();
369 IsResolved = isScatteredFixupFullyResolved(*this, Layout, Fixup, Target,
372 const MCSection *BaseSection = 0;
374 BaseSection = &DF->getParent()->getSection();
376 IsResolved = isScatteredFixupFullyResolvedSimple(*this, Fixup, Target,
382 Value -= Layout.getFragmentAddress(DF) + Fixup.Offset;
387 uint64_t MCAssembler::ComputeFragmentSize(MCAsmLayout &Layout,
389 uint64_t SectionAddress,
390 uint64_t FragmentOffset) const {
391 switch (F.getKind()) {
392 case MCFragment::FT_Data:
393 return cast<MCDataFragment>(F).getContents().size();
394 case MCFragment::FT_Fill:
395 return cast<MCFillFragment>(F).getSize();
396 case MCFragment::FT_Inst:
397 return cast<MCInstFragment>(F).getInstSize();
399 case MCFragment::FT_Align: {
400 const MCAlignFragment &AF = cast<MCAlignFragment>(F);
402 assert((!AF.hasOnlyAlignAddress() || !AF.getNextNode()) &&
403 "Invalid OnlyAlignAddress bit, not the last fragment!");
405 uint64_t Size = OffsetToAlignment(SectionAddress + FragmentOffset,
408 // Honor MaxBytesToEmit.
409 if (Size > AF.getMaxBytesToEmit())
415 case MCFragment::FT_Org: {
416 const MCOrgFragment &OF = cast<MCOrgFragment>(F);
418 // FIXME: We should compute this sooner, we don't want to recurse here, and
419 // we would like to be more functional.
420 int64_t TargetLocation;
421 if (!OF.getOffset().EvaluateAsAbsolute(TargetLocation, &Layout))
422 report_fatal_error("expected assembly-time absolute expression");
424 // FIXME: We need a way to communicate this error.
425 int64_t Offset = TargetLocation - FragmentOffset;
427 report_fatal_error("invalid .org offset '" + Twine(TargetLocation) +
428 "' (at offset '" + Twine(FragmentOffset) + "'");
434 assert(0 && "invalid fragment kind");
438 void MCAsmLayout::LayoutFile() {
439 // Initialize the first section and set the valid fragment layout point.
440 LastValidFragment = 0;
441 if (!getSectionOrder().empty())
442 getSectionOrder().front()->Address = 0;
444 for (unsigned i = 0, e = getSectionOrder().size(); i != e; ++i) {
445 MCSectionData *SD = getSectionOrder()[i];
447 for (MCSectionData::iterator it = SD->begin(),
448 ie = SD->end(); it != ie; ++it)
453 void MCAsmLayout::LayoutFragment(MCFragment *F) {
454 MCFragment *Prev = F->getPrevNode();
456 // We should never try to recompute something which is up-to-date.
457 assert(!isFragmentUpToDate(F) && "Attempt to recompute up-to-date fragment!");
458 // We should never try to compute the fragment layout if the section isn't
460 assert(isSectionUpToDate(F->getParent()) &&
461 "Attempt to compute fragment before it's section!");
462 // We should never try to compute the fragment layout if it's predecessor
464 assert((!Prev || isFragmentUpToDate(Prev)) &&
465 "Attempt to compute fragment before it's predecessor!");
467 ++stats::FragmentLayouts;
469 // Compute the fragment start address.
470 uint64_t StartAddress = F->getParent()->Address;
471 uint64_t Address = StartAddress;
473 Address += Prev->Offset + Prev->EffectiveSize;
475 // Compute fragment offset and size.
476 F->Offset = Address - StartAddress;
477 F->EffectiveSize = getAssembler().ComputeFragmentSize(*this, *F, StartAddress,
479 LastValidFragment = F;
481 // If this is the last fragment in a section, update the next section address.
482 if (!F->getNextNode()) {
483 unsigned NextIndex = F->getParent()->getLayoutOrder() + 1;
484 if (NextIndex != getSectionOrder().size())
485 LayoutSection(getSectionOrder()[NextIndex]);
489 void MCAsmLayout::LayoutSection(MCSectionData *SD) {
490 unsigned SectionOrderIndex = SD->getLayoutOrder();
492 ++stats::SectionLayouts;
494 // Compute the section start address.
495 uint64_t StartAddress = 0;
496 if (SectionOrderIndex) {
497 MCSectionData *Prev = getSectionOrder()[SectionOrderIndex - 1];
498 StartAddress = getSectionAddress(Prev) + getSectionAddressSize(Prev);
501 // Honor the section alignment requirements.
502 StartAddress = RoundUpToAlignment(StartAddress, SD->getAlignment());
504 // Set the section address.
505 SD->Address = StartAddress;
508 /// WriteFragmentData - Write the \arg F data to the output file.
509 static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout,
510 const MCFragment &F, MCObjectWriter *OW) {
511 uint64_t Start = OW->getStream().tell();
514 ++stats::EmittedFragments;
516 // FIXME: Embed in fragments instead?
517 uint64_t FragmentSize = Layout.getFragmentEffectiveSize(&F);
518 switch (F.getKind()) {
519 case MCFragment::FT_Align: {
520 MCAlignFragment &AF = cast<MCAlignFragment>(F);
521 uint64_t Count = FragmentSize / AF.getValueSize();
523 assert(AF.getValueSize() && "Invalid virtual align in concrete fragment!");
525 // FIXME: This error shouldn't actually occur (the front end should emit
526 // multiple .align directives to enforce the semantics it wants), but is
527 // severe enough that we want to report it. How to handle this?
528 if (Count * AF.getValueSize() != FragmentSize)
529 report_fatal_error("undefined .align directive, value size '" +
530 Twine(AF.getValueSize()) +
531 "' is not a divisor of padding size '" +
532 Twine(FragmentSize) + "'");
534 // See if we are aligning with nops, and if so do that first to try to fill
535 // the Count bytes. Then if that did not fill any bytes or there are any
536 // bytes left to fill use the the Value and ValueSize to fill the rest.
537 // If we are aligning with nops, ask that target to emit the right data.
538 if (AF.hasEmitNops()) {
539 if (!Asm.getBackend().WriteNopData(Count, OW))
540 report_fatal_error("unable to write nop sequence of " +
541 Twine(Count) + " bytes");
545 // Otherwise, write out in multiples of the value size.
546 for (uint64_t i = 0; i != Count; ++i) {
547 switch (AF.getValueSize()) {
549 assert(0 && "Invalid size!");
550 case 1: OW->Write8 (uint8_t (AF.getValue())); break;
551 case 2: OW->Write16(uint16_t(AF.getValue())); break;
552 case 4: OW->Write32(uint32_t(AF.getValue())); break;
553 case 8: OW->Write64(uint64_t(AF.getValue())); break;
559 case MCFragment::FT_Data: {
560 MCDataFragment &DF = cast<MCDataFragment>(F);
561 assert(FragmentSize == DF.getContents().size() && "Invalid size!");
562 OW->WriteBytes(DF.getContents().str());
566 case MCFragment::FT_Fill: {
567 MCFillFragment &FF = cast<MCFillFragment>(F);
569 assert(FF.getValueSize() && "Invalid virtual align in concrete fragment!");
571 for (uint64_t i = 0, e = FF.getSize() / FF.getValueSize(); i != e; ++i) {
572 switch (FF.getValueSize()) {
574 assert(0 && "Invalid size!");
575 case 1: OW->Write8 (uint8_t (FF.getValue())); break;
576 case 2: OW->Write16(uint16_t(FF.getValue())); break;
577 case 4: OW->Write32(uint32_t(FF.getValue())); break;
578 case 8: OW->Write64(uint64_t(FF.getValue())); break;
584 case MCFragment::FT_Inst:
585 llvm_unreachable("unexpected inst fragment after lowering");
588 case MCFragment::FT_Org: {
589 MCOrgFragment &OF = cast<MCOrgFragment>(F);
591 for (uint64_t i = 0, e = FragmentSize; i != e; ++i)
592 OW->Write8(uint8_t(OF.getValue()));
598 assert(OW->getStream().tell() - Start == FragmentSize);
601 void MCAssembler::WriteSectionData(const MCSectionData *SD,
602 const MCAsmLayout &Layout,
603 MCObjectWriter *OW) const {
604 // Ignore virtual sections.
605 if (getBackend().isVirtualSection(SD->getSection())) {
606 assert(Layout.getSectionFileSize(SD) == 0 && "Invalid size for section!");
608 // Check that contents are only things legal inside a virtual section.
609 for (MCSectionData::const_iterator it = SD->begin(),
610 ie = SD->end(); it != ie; ++it) {
611 switch (it->getKind()) {
613 assert(0 && "Invalid fragment in virtual section!");
614 case MCFragment::FT_Align:
615 assert(!cast<MCAlignFragment>(it)->getValueSize() &&
616 "Invalid align in virtual section!");
618 case MCFragment::FT_Fill:
619 assert(!cast<MCFillFragment>(it)->getValueSize() &&
620 "Invalid fill in virtual section!");
628 uint64_t Start = OW->getStream().tell();
631 for (MCSectionData::const_iterator it = SD->begin(),
632 ie = SD->end(); it != ie; ++it)
633 WriteFragmentData(*this, Layout, *it, OW);
635 assert(OW->getStream().tell() - Start == Layout.getSectionFileSize(SD));
638 void MCAssembler::Finish() {
639 DEBUG_WITH_TYPE("mc-dump", {
640 llvm::errs() << "assembler backend - pre-layout\n--\n";
643 // Create the layout object.
644 MCAsmLayout Layout(*this);
646 // Insert additional align fragments for concrete sections to explicitly pad
647 // the previous section to match their alignment requirements. This is for
648 // 'gas' compatibility, it shouldn't strictly be necessary.
650 // FIXME: This may be Mach-O specific.
651 for (unsigned i = 1, e = Layout.getSectionOrder().size(); i < e; ++i) {
652 MCSectionData *SD = Layout.getSectionOrder()[i];
654 // Ignore sections without alignment requirements.
655 unsigned Align = SD->getAlignment();
659 // Ignore virtual sections, they don't cause file size modifications.
660 if (getBackend().isVirtualSection(SD->getSection()))
663 // Otherwise, create a new align fragment at the end of the previous
665 MCAlignFragment *AF = new MCAlignFragment(Align, 0, 1, Align,
666 Layout.getSectionOrder()[i - 1]);
667 AF->setOnlyAlignAddress(true);
670 // Create dummy fragments and assign section ordinals.
671 unsigned SectionIndex = 0;
672 for (MCAssembler::iterator it = begin(), ie = end(); it != ie; ++it) {
673 // Create dummy fragments to eliminate any empty sections, this simplifies
675 if (it->getFragmentList().empty()) {
676 unsigned ValueSize = 1;
677 if (getBackend().isVirtualSection(it->getSection()))
679 new MCFillFragment(0, 1, 0, it);
682 it->setOrdinal(SectionIndex++);
685 // Assign layout order indices to sections and fragments.
686 unsigned FragmentIndex = 0;
687 for (unsigned i = 0, e = Layout.getSectionOrder().size(); i != e; ++i) {
688 MCSectionData *SD = Layout.getSectionOrder()[i];
689 SD->setLayoutOrder(i);
691 for (MCSectionData::iterator it2 = SD->begin(),
692 ie2 = SD->end(); it2 != ie2; ++it2)
693 it2->setLayoutOrder(FragmentIndex++);
696 // Layout until everything fits.
697 while (LayoutOnce(Layout))
700 DEBUG_WITH_TYPE("mc-dump", {
701 llvm::errs() << "assembler backend - post-relaxation\n--\n";
704 // Finalize the layout, including fragment lowering.
705 FinishLayout(Layout);
707 DEBUG_WITH_TYPE("mc-dump", {
708 llvm::errs() << "assembler backend - final-layout\n--\n";
711 uint64_t StartOffset = OS.tell();
712 llvm::OwningPtr<MCObjectWriter> Writer(getBackend().createObjectWriter(OS));
714 report_fatal_error("unable to create object writer!");
716 // Allow the object writer a chance to perform post-layout binding (for
717 // example, to set the index fields in the symbol data).
718 Writer->ExecutePostLayoutBinding(*this);
720 // Evaluate and apply the fixups, generating relocation entries as necessary.
721 for (MCAssembler::iterator it = begin(), ie = end(); it != ie; ++it) {
722 for (MCSectionData::iterator it2 = it->begin(),
723 ie2 = it->end(); it2 != ie2; ++it2) {
724 MCDataFragment *DF = dyn_cast<MCDataFragment>(it2);
728 for (MCDataFragment::fixup_iterator it3 = DF->fixup_begin(),
729 ie3 = DF->fixup_end(); it3 != ie3; ++it3) {
730 MCAsmFixup &Fixup = *it3;
732 // Evaluate the fixup.
735 if (!EvaluateFixup(Layout, Fixup, DF, Target, FixedValue)) {
736 // The fixup was unresolved, we need a relocation. Inform the object
737 // writer of the relocation, and give it an opportunity to adjust the
738 // fixup value if need be.
739 Writer->RecordRelocation(*this, Layout, DF, Fixup, Target,FixedValue);
742 getBackend().ApplyFixup(Fixup, *DF, FixedValue);
747 // Write the object file.
748 Writer->WriteObject(*this, Layout);
751 stats::ObjectBytes += OS.tell() - StartOffset;
754 bool MCAssembler::FixupNeedsRelaxation(const MCAsmFixup &Fixup,
755 const MCFragment *DF,
756 const MCAsmLayout &Layout) const {
760 // If we cannot resolve the fixup value, it requires relaxation.
763 if (!EvaluateFixup(Layout, Fixup, DF, Target, Value))
766 // Otherwise, relax if the value is too big for a (signed) i8.
768 // FIXME: This is target dependent!
769 return int64_t(Value) != int64_t(int8_t(Value));
772 bool MCAssembler::FragmentNeedsRelaxation(const MCInstFragment *IF,
773 const MCAsmLayout &Layout) const {
774 // If this inst doesn't ever need relaxation, ignore it. This occurs when we
775 // are intentionally pushing out inst fragments, or because we relaxed a
776 // previous instruction to one that doesn't need relaxation.
777 if (!getBackend().MayNeedRelaxation(IF->getInst(), IF->getFixups()))
780 for (MCInstFragment::const_fixup_iterator it = IF->fixup_begin(),
781 ie = IF->fixup_end(); it != ie; ++it)
782 if (FixupNeedsRelaxation(*it, IF, Layout))
788 bool MCAssembler::LayoutOnce(MCAsmLayout &Layout) {
789 ++stats::RelaxationSteps;
791 // Layout the sections in order.
794 // Scan for fragments that need relaxation.
795 bool WasRelaxed = false;
796 for (iterator it = begin(), ie = end(); it != ie; ++it) {
797 MCSectionData &SD = *it;
799 for (MCSectionData::iterator it2 = SD.begin(),
800 ie2 = SD.end(); it2 != ie2; ++it2) {
801 // Check if this is an instruction fragment that needs relaxation.
802 MCInstFragment *IF = dyn_cast<MCInstFragment>(it2);
803 if (!IF || !FragmentNeedsRelaxation(IF, Layout))
806 ++stats::RelaxedInstructions;
808 // FIXME-PERF: We could immediately lower out instructions if we can tell
809 // they are fully resolved, to avoid retesting on later passes.
811 // Relax the fragment.
814 getBackend().RelaxInstruction(IF, Relaxed);
816 // Encode the new instruction.
818 // FIXME-PERF: If it matters, we could let the target do this. It can
819 // probably do so more efficiently in many cases.
820 SmallVector<MCFixup, 4> Fixups;
821 SmallString<256> Code;
822 raw_svector_ostream VecOS(Code);
823 getEmitter().EncodeInstruction(Relaxed, VecOS, Fixups);
826 // Update the instruction fragment.
827 int SlideAmount = Code.size() - IF->getInstSize();
828 IF->setInst(Relaxed);
829 IF->getCode() = Code;
830 IF->getFixups().clear();
831 for (unsigned i = 0, e = Fixups.size(); i != e; ++i) {
832 MCFixup &F = Fixups[i];
833 IF->getFixups().push_back(MCAsmFixup(F.getOffset(), *F.getValue(),
837 // Update the layout, and remember that we relaxed. If we are relaxing
838 // everything, we can skip this step since nothing will depend on updating
841 Layout.UpdateForSlide(IF, SlideAmount);
849 void MCAssembler::FinishLayout(MCAsmLayout &Layout) {
850 // Lower out any instruction fragments, to simplify the fixup application and
853 // FIXME-PERF: We don't have to do this, but the assumption is that it is
854 // cheap (we will mostly end up eliminating fragments and appending on to data
855 // fragments), so the extra complexity downstream isn't worth it. Evaluate
857 for (iterator it = begin(), ie = end(); it != ie; ++it) {
858 MCSectionData &SD = *it;
860 for (MCSectionData::iterator it2 = SD.begin(),
861 ie2 = SD.end(); it2 != ie2; ++it2) {
862 MCInstFragment *IF = dyn_cast<MCInstFragment>(it2);
866 // Create a new data fragment for the instruction.
868 // FIXME-PERF: Reuse previous data fragment if possible.
869 MCDataFragment *DF = new MCDataFragment();
870 SD.getFragmentList().insert(it2, DF);
872 // Update the data fragments layout data.
873 DF->setParent(IF->getParent());
874 DF->setAtom(IF->getAtom());
875 DF->setLayoutOrder(IF->getLayoutOrder());
876 Layout.FragmentReplaced(IF, DF);
878 // Copy in the data and the fixups.
879 DF->getContents().append(IF->getCode().begin(), IF->getCode().end());
880 for (unsigned i = 0, e = IF->getFixups().size(); i != e; ++i)
881 DF->getFixups().push_back(IF->getFixups()[i]);
883 // Delete the instruction fragment and update the iterator.
884 SD.getFragmentList().erase(IF);
894 raw_ostream &operator<<(raw_ostream &OS, const MCAsmFixup &AF) {
895 OS << "<MCAsmFixup" << " Offset:" << AF.Offset << " Value:" << *AF.Value
896 << " Kind:" << AF.Kind << ">";
902 void MCFragment::dump() {
903 raw_ostream &OS = llvm::errs();
905 OS << "<MCFragment " << (void*) this << " LayoutOrder:" << LayoutOrder
906 << " Offset:" << Offset << " EffectiveSize:" << EffectiveSize << ">";
909 void MCAlignFragment::dump() {
910 raw_ostream &OS = llvm::errs();
912 OS << "<MCAlignFragment ";
913 this->MCFragment::dump();
915 OS << " (emit nops)";
916 if (hasOnlyAlignAddress())
917 OS << " (only align section)";
919 OS << " Alignment:" << getAlignment()
920 << " Value:" << getValue() << " ValueSize:" << getValueSize()
921 << " MaxBytesToEmit:" << getMaxBytesToEmit() << ">";
924 void MCDataFragment::dump() {
925 raw_ostream &OS = llvm::errs();
927 OS << "<MCDataFragment ";
928 this->MCFragment::dump();
931 for (unsigned i = 0, e = getContents().size(); i != e; ++i) {
933 OS << hexdigit((Contents[i] >> 4) & 0xF) << hexdigit(Contents[i] & 0xF);
935 OS << "] (" << getContents().size() << " bytes)";
937 if (!getFixups().empty()) {
940 for (fixup_iterator it = fixup_begin(), ie = fixup_end(); it != ie; ++it) {
941 if (it != fixup_begin()) OS << ",\n ";
950 void MCFillFragment::dump() {
951 raw_ostream &OS = llvm::errs();
953 OS << "<MCFillFragment ";
954 this->MCFragment::dump();
956 OS << " Value:" << getValue() << " ValueSize:" << getValueSize()
957 << " Size:" << getSize() << ">";
960 void MCInstFragment::dump() {
961 raw_ostream &OS = llvm::errs();
963 OS << "<MCInstFragment ";
964 this->MCFragment::dump();
967 getInst().dump_pretty(OS);
971 void MCOrgFragment::dump() {
972 raw_ostream &OS = llvm::errs();
974 OS << "<MCOrgFragment ";
975 this->MCFragment::dump();
977 OS << " Offset:" << getOffset() << " Value:" << getValue() << ">";
980 void MCSectionData::dump() {
981 raw_ostream &OS = llvm::errs();
983 OS << "<MCSectionData";
984 OS << " Alignment:" << getAlignment() << " Address:" << Address
985 << " Fragments:[\n ";
986 for (iterator it = begin(), ie = end(); it != ie; ++it) {
987 if (it != begin()) OS << ",\n ";
993 void MCSymbolData::dump() {
994 raw_ostream &OS = llvm::errs();
996 OS << "<MCSymbolData Symbol:" << getSymbol()
997 << " Fragment:" << getFragment() << " Offset:" << getOffset()
998 << " Flags:" << getFlags() << " Index:" << getIndex();
1000 OS << " (common, size:" << getCommonSize()
1001 << " align: " << getCommonAlignment() << ")";
1003 OS << " (external)";
1004 if (isPrivateExtern())
1005 OS << " (private extern)";
1009 void MCAssembler::dump() {
1010 raw_ostream &OS = llvm::errs();
1012 OS << "<MCAssembler\n";
1013 OS << " Sections:[\n ";
1014 for (iterator it = begin(), ie = end(); it != ie; ++it) {
1015 if (it != begin()) OS << ",\n ";
1021 for (symbol_iterator it = symbol_begin(), ie = symbol_end(); it != ie; ++it) {
1022 if (it != symbol_begin()) OS << ",\n ";