X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FMC%2FMCAssembler.cpp;h=9992646042243be34d8cad2a86b5e50c46a46382;hb=8fbc00b5bab870d3c756d40add9b0eb27827fb97;hp=6cdef20d2496d371eef5fbd4d618a19bfe89d3d6;hpb=3153fec733acd079a9e681d16d39253b9517e02c;p=oota-llvm.git diff --git a/lib/MC/MCAssembler.cpp b/lib/MC/MCAssembler.cpp index 6cdef20d249..99926460422 100644 --- a/lib/MC/MCAssembler.cpp +++ b/lib/MC/MCAssembler.cpp @@ -11,10 +11,13 @@ #include "llvm/MC/MCAssembler.h" #include "llvm/MC/MCAsmLayout.h" #include "llvm/MC/MCCodeEmitter.h" +#include "llvm/MC/MCContext.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCObjectWriter.h" +#include "llvm/MC/MCSection.h" #include "llvm/MC/MCSymbol.h" #include "llvm/MC/MCValue.h" +#include "llvm/MC/MCDwarf.h" #include "llvm/ADT/OwningPtr.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringExtras.h" @@ -36,7 +39,6 @@ STATISTIC(FragmentLayouts, "Number of fragment layouts"); STATISTIC(ObjectBytes, "Number of emitted object file bytes"); STATISTIC(RelaxationSteps, "Number of assembler layout and relaxation steps"); STATISTIC(RelaxedInstructions, "Number of relaxed instructions"); -STATISTIC(SectionLayouts, "Number of section layouts"); } } @@ -47,83 +49,77 @@ STATISTIC(SectionLayouts, "Number of section layouts"); /* *** */ -MCAsmLayout::MCAsmLayout(MCAssembler &Asm) : Assembler(Asm) { +MCAsmLayout::MCAsmLayout(MCAssembler &Asm) + : Assembler(Asm), LastValidFragment() + { // Compute the section layout order. Virtual sections must go last. for (MCAssembler::iterator it = Asm.begin(), ie = Asm.end(); it != ie; ++it) - if (!Asm.getBackend().isVirtualSection(it->getSection())) + if (!it->getSection().isVirtualSection()) SectionOrder.push_back(&*it); for (MCAssembler::iterator it = Asm.begin(), ie = Asm.end(); it != ie; ++it) - if (Asm.getBackend().isVirtualSection(it->getSection())) + if (it->getSection().isVirtualSection()) SectionOrder.push_back(&*it); } -void MCAsmLayout::UpdateForSlide(MCFragment *F, int SlideAmount) { - // We shouldn't have to do anything special to support negative slides, and it - // is a perfectly valid thing to do as long as other parts of the system can - // guarantee convergence. - assert(SlideAmount >= 0 && "Negative slides not yet supported"); +bool MCAsmLayout::isFragmentUpToDate(const MCFragment *F) const { + const MCSectionData &SD = *F->getParent(); + const MCFragment *LastValid = LastValidFragment.lookup(&SD); + if (!LastValid) + return false; + assert(LastValid->getParent() == F->getParent()); + return F->getLayoutOrder() <= LastValid->getLayoutOrder(); +} - // Update the layout by simply recomputing the layout for the entire - // file. This is trivially correct, but very slow. - // - // FIXME-PERF: This is O(N^2), but will be eliminated once we get smarter. +void MCAsmLayout::Invalidate(MCFragment *F) { + // If this fragment wasn't already up-to-date, we don't need to do anything. + if (!isFragmentUpToDate(F)) + return; - // Layout the sections in order. - for (unsigned i = 0, e = getSectionOrder().size(); i != e; ++i) - getAssembler().LayoutSection(*this, i); + // Otherwise, reset the last valid fragment to this fragment. + const MCSectionData &SD = *F->getParent(); + LastValidFragment[&SD] = F; } -uint64_t MCAsmLayout::getFragmentAddress(const MCFragment *F) const { - assert(F->getParent() && "Missing section()!"); - return getSectionAddress(F->getParent()) + getFragmentOffset(F); -} +void MCAsmLayout::EnsureValid(const MCFragment *F) const { + MCSectionData &SD = *F->getParent(); -uint64_t MCAsmLayout::getFragmentEffectiveSize(const MCFragment *F) const { - assert(F->EffectiveSize != ~UINT64_C(0) && "Address not set!"); - return F->EffectiveSize; -} + MCFragment *Cur = LastValidFragment[&SD]; + if (!Cur) + Cur = &*SD.begin(); + else + Cur = Cur->getNextNode(); -void MCAsmLayout::setFragmentEffectiveSize(MCFragment *F, uint64_t Value) { - F->EffectiveSize = Value; + // Advance the layout position until the fragment is up-to-date. + while (!isFragmentUpToDate(F)) { + const_cast(this)->LayoutFragment(Cur); + Cur = Cur->getNextNode(); + } } uint64_t MCAsmLayout::getFragmentOffset(const MCFragment *F) const { + EnsureValid(F); assert(F->Offset != ~UINT64_C(0) && "Address not set!"); return F->Offset; } -void MCAsmLayout::setFragmentOffset(MCFragment *F, uint64_t Value) { - F->Offset = Value; -} - -uint64_t MCAsmLayout::getSymbolAddress(const MCSymbolData *SD) const { - assert(SD->getFragment() && "Invalid getAddress() on undefined symbol!"); - return getFragmentAddress(SD->getFragment()) + SD->getOffset(); -} - -uint64_t MCAsmLayout::getSectionAddress(const MCSectionData *SD) const { - assert(SD->Address != ~UINT64_C(0) && "Address not set!"); - return SD->Address; -} - -void MCAsmLayout::setSectionAddress(MCSectionData *SD, uint64_t Value) { - SD->Address = Value; +uint64_t MCAsmLayout::getSymbolOffset(const MCSymbolData *SD) const { + assert(SD->getFragment() && "Invalid getOffset() on undefined symbol!"); + return getFragmentOffset(SD->getFragment()) + SD->getOffset(); } -uint64_t MCAsmLayout::getSectionSize(const MCSectionData *SD) const { - assert(SD->Size != ~UINT64_C(0) && "File size not set!"); - return SD->Size; -} -void MCAsmLayout::setSectionSize(MCSectionData *SD, uint64_t Value) { - SD->Size = Value; +uint64_t MCAsmLayout::getSectionAddressSize(const MCSectionData *SD) const { + // The size is the last fragment's end offset. + const MCFragment &F = SD->getFragmentList().back(); + return getFragmentOffset(&F) + getAssembler().ComputeFragmentSize(*this, F); } uint64_t MCAsmLayout::getSectionFileSize(const MCSectionData *SD) const { - assert(SD->FileSize != ~UINT64_C(0) && "File size not set!"); - return SD->FileSize; -} -void MCAsmLayout::setSectionFileSize(MCSectionData *SD, uint64_t Value) { - SD->FileSize = Value; + // Virtual sections have no file size. + if (SD->getSection().isVirtualSection()) + return 0; + + // Otherwise, the file size is the same as the address space size. + return getSectionAddressSize(SD); } /* *** */ @@ -131,26 +127,24 @@ void MCAsmLayout::setSectionFileSize(MCSectionData *SD, uint64_t Value) { MCFragment::MCFragment() : Kind(FragmentType(~0)) { } +MCFragment::~MCFragment() { +} + MCFragment::MCFragment(FragmentType _Kind, MCSectionData *_Parent) - : Kind(_Kind), Parent(_Parent), Atom(0), EffectiveSize(~UINT64_C(0)) + : Kind(_Kind), Parent(_Parent), Atom(0), Offset(~UINT64_C(0)) { if (Parent) Parent->getFragmentList().push_back(this); } -MCFragment::~MCFragment() { -} - /* *** */ MCSectionData::MCSectionData() : Section(0) {} MCSectionData::MCSectionData(const MCSection &_Section, MCAssembler *A) : Section(&_Section), + Ordinal(~UINT32_C(0)), Alignment(1), - Address(~UINT64_C(0)), - Size(~UINT64_C(0)), - FileSize(~UINT64_C(0)), HasInstructions(false) { if (A) @@ -165,7 +159,8 @@ MCSymbolData::MCSymbolData(const MCSymbol &_Symbol, MCFragment *_Fragment, uint64_t _Offset, MCAssembler *A) : Symbol(&_Symbol), Fragment(_Fragment), Offset(_Offset), IsExternal(false), IsPrivateExtern(false), - CommonSize(0), CommonAlign(0), Flags(0), Index(0) + CommonSize(0), SymbolSize(0), CommonAlign(0), + Flags(0), Index(0) { if (A) A->getSymbolList().push_back(this); @@ -173,117 +168,33 @@ MCSymbolData::MCSymbolData(const MCSymbol &_Symbol, MCFragment *_Fragment, /* *** */ -MCAssembler::MCAssembler(MCContext &_Context, TargetAsmBackend &_Backend, - MCCodeEmitter &_Emitter, raw_ostream &_OS) - : Context(_Context), Backend(_Backend), Emitter(_Emitter), - OS(_OS), RelaxAll(false), SubsectionsViaSymbols(false) +MCAssembler::MCAssembler(MCContext &Context_, TargetAsmBackend &Backend_, + MCCodeEmitter &Emitter_, MCObjectWriter &Writer_, + raw_ostream &OS_) + : Context(Context_), Backend(Backend_), Emitter(Emitter_), Writer(Writer_), + OS(OS_), RelaxAll(false), NoExecStack(false), SubsectionsViaSymbols(false) { } MCAssembler::~MCAssembler() { } -static bool isScatteredFixupFullyResolvedSimple(const MCAssembler &Asm, - const MCAsmFixup &Fixup, - const MCValue Target, - const MCSection *BaseSection) { - // The effective fixup address is - // addr(atom(A)) + offset(A) - // - addr(atom(B)) - offset(B) - // - addr() + - // and the offsets are not relocatable, so the fixup is fully resolved when - // addr(atom(A)) - addr(atom(B)) - addr()) == 0. - // - // The simple (Darwin, except on x86_64) way of dealing with this was to - // assume that any reference to a temporary symbol *must* be a temporary - // symbol in the same atom, unless the sections differ. Therefore, any PCrel - // relocation to a temporary symbol (in the same section) is fully - // resolved. This also works in conjunction with absolutized .set, which - // requires the compiler to use .set to absolutize the differences between - // symbols which the compiler knows to be assembly time constants, so we don't - // need to worry about considering symbol differences fully resolved. - - // Non-relative fixups are only resolved if constant. - if (!BaseSection) - return Target.isAbsolute(); - - // Otherwise, relative fixups are only resolved if not a difference and the - // target is a temporary in the same section. - if (Target.isAbsolute() || Target.getSymB()) - return false; - - const MCSymbol *A = &Target.getSymA()->getSymbol(); - if (!A->isTemporary() || !A->isInSection() || - &A->getSection() != BaseSection) - return false; - - return true; -} - -static bool isScatteredFixupFullyResolved(const MCAssembler &Asm, - const MCAsmLayout &Layout, - const MCAsmFixup &Fixup, - const MCValue Target, - const MCSymbolData *BaseSymbol) { - // The effective fixup address is - // addr(atom(A)) + offset(A) - // - addr(atom(B)) - offset(B) - // - addr(BaseSymbol) + - // and the offsets are not relocatable, so the fixup is fully resolved when - // addr(atom(A)) - addr(atom(B)) - addr(BaseSymbol) == 0. - // - // Note that "false" is almost always conservatively correct (it means we emit - // a relocation which is unnecessary), except when it would force us to emit a - // relocation which the target cannot encode. - - const MCSymbolData *A_Base = 0, *B_Base = 0; - if (const MCSymbolRefExpr *A = Target.getSymA()) { - // Modified symbol references cannot be resolved. - if (A->getKind() != MCSymbolRefExpr::VK_None) - return false; - - A_Base = Asm.getAtom(Layout, &Asm.getSymbolData(A->getSymbol())); - if (!A_Base) - return false; - } - - if (const MCSymbolRefExpr *B = Target.getSymB()) { - // Modified symbol references cannot be resolved. - if (B->getKind() != MCSymbolRefExpr::VK_None) - return false; - - B_Base = Asm.getAtom(Layout, &Asm.getSymbolData(B->getSymbol())); - if (!B_Base) - return false; - } - - // If there is no base, A and B have to be the same atom for this fixup to be - // fully resolved. - if (!BaseSymbol) - return A_Base == B_Base; - - // Otherwise, B must be missing and A must be the base. - return !B_Base && BaseSymbol == A_Base; -} - -bool MCAssembler::isSymbolLinkerVisible(const MCSymbolData *SD) const { +bool MCAssembler::isSymbolLinkerVisible(const MCSymbol &Symbol) const { // Non-temporary labels should always be visible to the linker. - if (!SD->getSymbol().isTemporary()) + if (!Symbol.isTemporary()) return true; // Absolute temporary labels are never visible. - if (!SD->getFragment()) + if (!Symbol.isInSection()) return false; // Otherwise, check if the section requires symbols even for temporary labels. - return getBackend().doesSectionRequireSymbols( - SD->getFragment()->getParent()->getSection()); + return getBackend().doesSectionRequireSymbols(Symbol.getSection()); } -const MCSymbolData *MCAssembler::getAtom(const MCAsmLayout &Layout, - const MCSymbolData *SD) const { +const MCSymbolData *MCAssembler::getAtom(const MCSymbolData *SD) const { // Linker visible symbols define atoms. - if (isSymbolLinkerVisible(SD)) + if (isSymbolLinkerVisible(SD->getSymbol())) return SD; // Absolute and undefined symbols have no defining atom. @@ -301,189 +212,159 @@ const MCSymbolData *MCAssembler::getAtom(const MCAsmLayout &Layout, } bool MCAssembler::EvaluateFixup(const MCAsmLayout &Layout, - const MCAsmFixup &Fixup, const MCFragment *DF, + const MCFixup &Fixup, const MCFragment *DF, MCValue &Target, uint64_t &Value) const { ++stats::EvaluateFixup; - if (!Fixup.Value->EvaluateAsRelocatable(Target, &Layout)) + if (!Fixup.getValue()->EvaluateAsRelocatable(Target, Layout)) report_fatal_error("expected relocatable expression"); - // FIXME: How do non-scattered symbols work in ELF? I presume the linker - // doesn't support small relocations, but then under what criteria does the - // assembler allow symbol differences? + bool IsPCRel = Backend.getFixupKindInfo( + Fixup.getKind()).Flags & MCFixupKindInfo::FKF_IsPCRel; + + bool IsResolved; + if (IsPCRel) { + if (Target.getSymB()) { + IsResolved = false; + } else if (!Target.getSymA()) { + IsResolved = false; + } else { + const MCSymbolRefExpr *A = Target.getSymA(); + const MCSymbol &SA = A->getSymbol(); + if (A->getKind() != MCSymbolRefExpr::VK_None || + SA.AliasedSymbol().isUndefined()) { + IsResolved = false; + } else { + const MCSymbolData &DataA = getSymbolData(SA); + IsResolved = + getWriter().IsSymbolRefDifferenceFullyResolvedImpl(*this, DataA, + *DF, false, true); + } + } + } else { + IsResolved = Target.isAbsolute(); + } Value = Target.getConstant(); - bool IsPCRel = - Emitter.getFixupKindInfo(Fixup.Kind).Flags & MCFixupKindInfo::FKF_IsPCRel; - bool IsResolved = true; + bool IsThumb = false; if (const MCSymbolRefExpr *A = Target.getSymA()) { - if (A->getSymbol().isDefined()) - Value += Layout.getSymbolAddress(&getSymbolData(A->getSymbol())); - else - IsResolved = false; + const MCSymbol &Sym = A->getSymbol().AliasedSymbol(); + if (Sym.isDefined()) + Value += Layout.getSymbolOffset(&getSymbolData(Sym)); + if (isThumbFunc(&Sym)) + IsThumb = true; } if (const MCSymbolRefExpr *B = Target.getSymB()) { - if (B->getSymbol().isDefined()) - Value -= Layout.getSymbolAddress(&getSymbolData(B->getSymbol())); - else - IsResolved = false; + const MCSymbol &Sym = B->getSymbol().AliasedSymbol(); + if (Sym.isDefined()) + Value -= Layout.getSymbolOffset(&getSymbolData(Sym)); } - // If we are using scattered symbols, determine whether this value is actually - // resolved; scattering may cause atoms to move. - if (IsResolved && getBackend().hasScatteredSymbols()) { - if (getBackend().hasReliableSymbolDifference()) { - // If this is a PCrel relocation, find the base atom (identified by its - // symbol) that the fixup value is relative to. - const MCSymbolData *BaseSymbol = 0; - if (IsPCRel) { - BaseSymbol = DF->getAtom(); - if (!BaseSymbol) - IsResolved = false; - } - if (IsResolved) - IsResolved = isScatteredFixupFullyResolved(*this, Layout, Fixup, Target, - BaseSymbol); - } else { - const MCSection *BaseSection = 0; - if (IsPCRel) - BaseSection = &DF->getParent()->getSection(); + bool ShouldAlignPC = Backend.getFixupKindInfo(Fixup.getKind()).Flags & + MCFixupKindInfo::FKF_IsAlignedDownTo32Bits; + assert((ShouldAlignPC ? IsPCRel : true) && + "FKF_IsAlignedDownTo32Bits is only allowed on PC-relative fixups!"); - IsResolved = isScatteredFixupFullyResolvedSimple(*this, Fixup, Target, - BaseSection); - } + if (IsPCRel) { + uint32_t Offset = Layout.getFragmentOffset(DF) + Fixup.getOffset(); + + // A number of ARM fixups in Thumb mode require that the effective PC + // address be determined as the 32-bit aligned version of the actual offset. + if (ShouldAlignPC) Offset &= ~0x3; + Value -= Offset; } - if (IsPCRel) - Value -= Layout.getFragmentAddress(DF) + Fixup.Offset; + // ARM fixups based from a thumb function address need to have the low + // bit set. The actual value is always at least 16-bit aligned, so the + // low bit is normally clear and available for use as an ISA flag for + // interworking. + if (IsThumb) + Value |= 1; return IsResolved; } -void MCAssembler::LayoutFragment(MCAsmLayout &Layout, MCFragment &F) { - uint64_t StartAddress = Layout.getSectionAddress(F.getParent()); - - // Get the fragment start address. - uint64_t Address = StartAddress; - MCSectionData::iterator it = &F; - if (MCFragment *Prev = F.getPrevNode()) - Address = (StartAddress + Layout.getFragmentOffset(Prev) + - Layout.getFragmentEffectiveSize(Prev)); - - ++stats::FragmentLayouts; - - uint64_t FragmentOffset = Address - StartAddress; - Layout.setFragmentOffset(&F, FragmentOffset); - - // Evaluate fragment size. - uint64_t EffectiveSize = 0; +uint64_t MCAssembler::ComputeFragmentSize(const MCAsmLayout &Layout, + const MCFragment &F) const { switch (F.getKind()) { - case MCFragment::FT_Align: { - MCAlignFragment &AF = cast(F); - - EffectiveSize = OffsetToAlignment(Address, AF.getAlignment()); - if (EffectiveSize > AF.getMaxBytesToEmit()) - EffectiveSize = 0; - break; - } - case MCFragment::FT_Data: - EffectiveSize = cast(F).getContents().size(); - break; + return cast(F).getContents().size(); + case MCFragment::FT_Fill: + return cast(F).getSize(); + case MCFragment::FT_Inst: + return cast(F).getInstSize(); - case MCFragment::FT_Fill: { - MCFillFragment &FF = cast(F); - EffectiveSize = FF.getSize(); - break; - } + case MCFragment::FT_LEB: + return cast(F).getContents().size(); - case MCFragment::FT_Inst: - EffectiveSize = cast(F).getInstSize(); - break; + case MCFragment::FT_Align: { + const MCAlignFragment &AF = cast(F); + unsigned Offset = Layout.getFragmentOffset(&AF); + unsigned Size = OffsetToAlignment(Offset, AF.getAlignment()); + if (Size > AF.getMaxBytesToEmit()) + return 0; + return Size; + } case MCFragment::FT_Org: { MCOrgFragment &OF = cast(F); - int64_t TargetLocation; - if (!OF.getOffset().EvaluateAsAbsolute(TargetLocation, &Layout)) + if (!OF.getOffset().EvaluateAsAbsolute(TargetLocation, Layout)) report_fatal_error("expected assembly-time absolute expression"); // FIXME: We need a way to communicate this error. - int64_t Offset = TargetLocation - FragmentOffset; - if (Offset < 0) + uint64_t FragmentOffset = Layout.getFragmentOffset(&OF); + int64_t Size = TargetLocation - FragmentOffset; + if (Size < 0 || Size >= 0x40000000) report_fatal_error("invalid .org offset '" + Twine(TargetLocation) + - "' (at offset '" + Twine(FragmentOffset) + "'"); - - EffectiveSize = Offset; - break; + "' (at offset '" + Twine(FragmentOffset) + "')"); + return Size; } - case MCFragment::FT_ZeroFill: { - EffectiveSize = cast(F).getSize(); - break; - } + case MCFragment::FT_Dwarf: + return cast(F).getContents().size(); + case MCFragment::FT_DwarfFrame: + return cast(F).getContents().size(); } - Layout.setFragmentEffectiveSize(&F, EffectiveSize); + assert(0 && "invalid fragment kind"); + return 0; } -void MCAssembler::LayoutSection(MCAsmLayout &Layout, - unsigned SectionOrderIndex) { - MCSectionData &SD = *Layout.getSectionOrder()[SectionOrderIndex]; - bool IsVirtual = getBackend().isVirtualSection(SD.getSection()); +void MCAsmLayout::LayoutFragment(MCFragment *F) { + MCFragment *Prev = F->getPrevNode(); - ++stats::SectionLayouts; + // We should never try to recompute something which is up-to-date. + assert(!isFragmentUpToDate(F) && "Attempt to recompute up-to-date fragment!"); + // We should never try to compute the fragment layout if it's predecessor + // isn't up-to-date. + assert((!Prev || isFragmentUpToDate(Prev)) && + "Attempt to compute fragment before it's predecessor!"); - // Get the section start address. - uint64_t StartAddress = 0; - if (SectionOrderIndex) { - MCSectionData *Prev = Layout.getSectionOrder()[SectionOrderIndex - 1]; - StartAddress = Layout.getSectionAddress(Prev) + Layout.getSectionSize(Prev); - } - - // Align this section if necessary by adding padding bytes to the previous - // section. It is safe to adjust this out-of-band, because no symbol or - // fragment is allowed to point past the end of the section at any time. - if (uint64_t Pad = OffsetToAlignment(StartAddress, SD.getAlignment())) { - // Unless this section is virtual (where we are allowed to adjust the offset - // freely), the padding goes in the previous section. - if (!IsVirtual) { - assert(SectionOrderIndex && "Invalid initial section address!"); - MCSectionData *Prev = Layout.getSectionOrder()[SectionOrderIndex - 1]; - Layout.setSectionFileSize(Prev, Layout.getSectionFileSize(Prev) + Pad); - } - - StartAddress += Pad; - } - - // Set the aligned section address. - Layout.setSectionAddress(&SD, StartAddress); + ++stats::FragmentLayouts; - for (MCSectionData::iterator it = SD.begin(), ie = SD.end(); it != ie; ++it) - LayoutFragment(Layout, *it); + // Compute fragment offset and size. + uint64_t Offset = 0; + if (Prev) + Offset += Prev->Offset + getAssembler().ComputeFragmentSize(*this, *Prev); - // Set the section sizes. - uint64_t Size = 0; - if (!SD.getFragmentList().empty()) { - MCFragment *F = &SD.getFragmentList().back(); - Size = Layout.getFragmentOffset(F) + Layout.getFragmentEffectiveSize(F); - } - Layout.setSectionSize(&SD, Size); - Layout.setSectionFileSize(&SD, IsVirtual ? 0 : Size); + F->Offset = Offset; + LastValidFragment[F->getParent()] = F; } /// WriteFragmentData - Write the \arg F data to the output file. static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout, - const MCFragment &F, MCObjectWriter *OW) { + const MCFragment &F) { + MCObjectWriter *OW = &Asm.getWriter(); uint64_t Start = OW->getStream().tell(); (void) Start; ++stats::EmittedFragments; // FIXME: Embed in fragments instead? - uint64_t FragmentSize = Layout.getFragmentEffectiveSize(&F); + uint64_t FragmentSize = Asm.ComputeFragmentSize(Layout, F); switch (F.getKind()) { case MCFragment::FT_Align: { MCAlignFragment &AF = cast(F); @@ -504,7 +385,7 @@ static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout, // the Count bytes. Then if that did not fill any bytes or there are any // bytes left to fill use the the Value and ValueSize to fill the rest. // If we are aligning with nops, ask that target to emit the right data. - if (AF.getEmitNops()) { + if (AF.hasEmitNops()) { if (!Asm.getBackend().WriteNopData(Count, OW)) report_fatal_error("unable to write nop sequence of " + Twine(Count) + " bytes"); @@ -534,6 +415,9 @@ static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout, case MCFragment::FT_Fill: { MCFillFragment &FF = cast(F); + + assert(FF.getValueSize() && "Invalid virtual align in concrete fragment!"); + for (uint64_t i = 0, e = FF.getSize() / FF.getValueSize(); i != e; ++i) { switch (FF.getValueSize()) { default: @@ -547,9 +431,17 @@ static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout, break; } - case MCFragment::FT_Inst: - llvm_unreachable("unexpected inst fragment after lowering"); + case MCFragment::FT_Inst: { + MCInstFragment &IF = cast(F); + OW->WriteBytes(StringRef(IF.getCode().begin(), IF.getCode().size())); + break; + } + + case MCFragment::FT_LEB: { + MCLEBFragment &LF = cast(F); + OW->WriteBytes(LF.getContents().str()); break; + } case MCFragment::FT_Org: { MCOrgFragment &OF = cast(F); @@ -560,8 +452,14 @@ static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout, break; } - case MCFragment::FT_ZeroFill: { - assert(0 && "Invalid zero fill fragment in concrete section!"); + case MCFragment::FT_Dwarf: { + const MCDwarfLineAddrFragment &OF = cast(F); + OW->WriteBytes(OF.getContents().str()); + break; + } + case MCFragment::FT_DwarfFrame: { + const MCDwarfCallFrameFragment &CF = cast(F); + OW->WriteBytes(CF.getContents().str()); break; } } @@ -570,50 +468,104 @@ static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout, } void MCAssembler::WriteSectionData(const MCSectionData *SD, - const MCAsmLayout &Layout, - MCObjectWriter *OW) const { - uint64_t SectionSize = Layout.getSectionSize(SD); - uint64_t SectionFileSize = Layout.getSectionFileSize(SD); - + const MCAsmLayout &Layout) const { // Ignore virtual sections. - if (getBackend().isVirtualSection(SD->getSection())) { - assert(SectionFileSize == 0 && "Invalid size for section!"); + if (SD->getSection().isVirtualSection()) { + assert(Layout.getSectionFileSize(SD) == 0 && "Invalid size for section!"); + + // Check that contents are only things legal inside a virtual section. + for (MCSectionData::const_iterator it = SD->begin(), + ie = SD->end(); it != ie; ++it) { + switch (it->getKind()) { + default: + assert(0 && "Invalid fragment in virtual section!"); + case MCFragment::FT_Data: { + // Check that we aren't trying to write a non-zero contents (or fixups) + // into a virtual section. This is to support clients which use standard + // directives to fill the contents of virtual sections. + MCDataFragment &DF = cast(*it); + assert(DF.fixup_begin() == DF.fixup_end() && + "Cannot have fixups in virtual section!"); + for (unsigned i = 0, e = DF.getContents().size(); i != e; ++i) + assert(DF.getContents()[i] == 0 && + "Invalid data value for virtual section!"); + break; + } + case MCFragment::FT_Align: + // Check that we aren't trying to write a non-zero value into a virtual + // section. + assert((!cast(it)->getValueSize() || + !cast(it)->getValue()) && + "Invalid align in virtual section!"); + break; + case MCFragment::FT_Fill: + assert(!cast(it)->getValueSize() && + "Invalid fill in virtual section!"); + break; + } + } + return; } - uint64_t Start = OW->getStream().tell(); + uint64_t Start = getWriter().getStream().tell(); (void) Start; for (MCSectionData::const_iterator it = SD->begin(), ie = SD->end(); it != ie; ++it) - WriteFragmentData(*this, Layout, *it, OW); - - // Add section padding. - assert(SectionFileSize >= SectionSize && "Invalid section sizes!"); - OW->WriteZeros(SectionFileSize - SectionSize); + WriteFragmentData(*this, Layout, *it); - assert(OW->getStream().tell() - Start == SectionFileSize); + assert(getWriter().getStream().tell() - Start == + Layout.getSectionAddressSize(SD)); } + +uint64_t MCAssembler::HandleFixup(const MCAsmLayout &Layout, + MCFragment &F, + const MCFixup &Fixup) { + // Evaluate the fixup. + MCValue Target; + uint64_t FixedValue; + if (!EvaluateFixup(Layout, Fixup, &F, Target, FixedValue)) { + // The fixup was unresolved, we need a relocation. Inform the object + // writer of the relocation, and give it an opportunity to adjust the + // fixup value if need be. + getWriter().RecordRelocation(*this, Layout, &F, Fixup, Target, FixedValue); + } + return FixedValue; + } + void MCAssembler::Finish() { DEBUG_WITH_TYPE("mc-dump", { llvm::errs() << "assembler backend - pre-layout\n--\n"; dump(); }); - // Assign section and fragment ordinals, all subsequent backend code is - // responsible for updating these in place. + // Create the layout object. + MCAsmLayout Layout(*this); + + // Create dummy fragments and assign section ordinals. unsigned SectionIndex = 0; - unsigned FragmentIndex = 0; for (MCAssembler::iterator it = begin(), ie = end(); it != ie; ++it) { + // Create dummy fragments to eliminate any empty sections, this simplifies + // layout. + if (it->getFragmentList().empty()) + new MCDataFragment(it); + it->setOrdinal(SectionIndex++); + } - for (MCSectionData::iterator it2 = it->begin(), - ie2 = it->end(); it2 != ie2; ++it2) - it2->setOrdinal(FragmentIndex++); + // Assign layout order indices to sections and fragments. + for (unsigned i = 0, e = Layout.getSectionOrder().size(); i != e; ++i) { + MCSectionData *SD = Layout.getSectionOrder()[i]; + SD->setLayoutOrder(i); + + unsigned FragmentIndex = 0; + for (MCSectionData::iterator it2 = SD->begin(), + ie2 = SD->end(); it2 != ie2; ++it2) + it2->setLayoutOrder(FragmentIndex++); } // Layout until everything fits. - MCAsmLayout Layout(*this); while (LayoutOnce(Layout)) continue; @@ -629,49 +581,45 @@ void MCAssembler::Finish() { dump(); }); uint64_t StartOffset = OS.tell(); - llvm::OwningPtr Writer(getBackend().createObjectWriter(OS)); - if (!Writer) - report_fatal_error("unable to create object writer!"); // Allow the object writer a chance to perform post-layout binding (for // example, to set the index fields in the symbol data). - Writer->ExecutePostLayoutBinding(*this); + getWriter().ExecutePostLayoutBinding(*this, Layout); // Evaluate and apply the fixups, generating relocation entries as necessary. for (MCAssembler::iterator it = begin(), ie = end(); it != ie; ++it) { for (MCSectionData::iterator it2 = it->begin(), ie2 = it->end(); it2 != ie2; ++it2) { MCDataFragment *DF = dyn_cast(it2); - if (!DF) - continue; - - for (MCDataFragment::fixup_iterator it3 = DF->fixup_begin(), - ie3 = DF->fixup_end(); it3 != ie3; ++it3) { - MCAsmFixup &Fixup = *it3; - - // Evaluate the fixup. - MCValue Target; - uint64_t FixedValue; - if (!EvaluateFixup(Layout, Fixup, DF, Target, FixedValue)) { - // The fixup was unresolved, we need a relocation. Inform the object - // writer of the relocation, and give it an opportunity to adjust the - // fixup value if need be. - Writer->RecordRelocation(*this, Layout, DF, Fixup, Target,FixedValue); + if (DF) { + for (MCDataFragment::fixup_iterator it3 = DF->fixup_begin(), + ie3 = DF->fixup_end(); it3 != ie3; ++it3) { + MCFixup &Fixup = *it3; + uint64_t FixedValue = HandleFixup(Layout, *DF, Fixup); + getBackend().ApplyFixup(Fixup, DF->getContents().data(), + DF->getContents().size(), FixedValue); + } + } + MCInstFragment *IF = dyn_cast(it2); + if (IF) { + for (MCInstFragment::fixup_iterator it3 = IF->fixup_begin(), + ie3 = IF->fixup_end(); it3 != ie3; ++it3) { + MCFixup &Fixup = *it3; + uint64_t FixedValue = HandleFixup(Layout, *IF, Fixup); + getBackend().ApplyFixup(Fixup, IF->getCode().data(), + IF->getCode().size(), FixedValue); } - - getBackend().ApplyFixup(Fixup, *DF, FixedValue); } } } // Write the object file. - Writer->WriteObject(*this, Layout); - OS.flush(); + getWriter().WriteObject(*this, Layout); stats::ObjectBytes += OS.tell() - StartOffset; } -bool MCAssembler::FixupNeedsRelaxation(const MCAsmFixup &Fixup, +bool MCAssembler::FixupNeedsRelaxation(const MCFixup &Fixup, const MCFragment *DF, const MCAsmLayout &Layout) const { if (getRelaxAll()) @@ -694,7 +642,7 @@ bool MCAssembler::FragmentNeedsRelaxation(const MCInstFragment *IF, // If this inst doesn't ever need relaxation, ignore it. This occurs when we // are intentionally pushing out inst fragments, or because we relaxed a // previous instruction to one that doesn't need relaxation. - if (!getBackend().MayNeedRelaxation(IF->getInst(), IF->getFixups())) + if (!getBackend().MayNeedRelaxation(IF->getInst())) return false; for (MCInstFragment::const_fixup_iterator it = IF->fixup_begin(), @@ -705,109 +653,144 @@ bool MCAssembler::FragmentNeedsRelaxation(const MCInstFragment *IF, return false; } -bool MCAssembler::LayoutOnce(MCAsmLayout &Layout) { - ++stats::RelaxationSteps; +bool MCAssembler::RelaxInstruction(MCAsmLayout &Layout, + MCInstFragment &IF) { + if (!FragmentNeedsRelaxation(&IF, Layout)) + return false; + + ++stats::RelaxedInstructions; + + // FIXME-PERF: We could immediately lower out instructions if we can tell + // they are fully resolved, to avoid retesting on later passes. + + // Relax the fragment. - // Layout the sections in order. - for (unsigned i = 0, e = Layout.getSectionOrder().size(); i != e; ++i) - LayoutSection(Layout, i); + MCInst Relaxed; + getBackend().RelaxInstruction(IF.getInst(), Relaxed); + // Encode the new instruction. + // + // FIXME-PERF: If it matters, we could let the target do this. It can + // probably do so more efficiently in many cases. + SmallVector Fixups; + SmallString<256> Code; + raw_svector_ostream VecOS(Code); + getEmitter().EncodeInstruction(Relaxed, VecOS, Fixups); + VecOS.flush(); + + // Update the instruction fragment. + IF.setInst(Relaxed); + IF.getCode() = Code; + IF.getFixups().clear(); + // FIXME: Eliminate copy. + for (unsigned i = 0, e = Fixups.size(); i != e; ++i) + IF.getFixups().push_back(Fixups[i]); + + return true; +} + +bool MCAssembler::RelaxLEB(MCAsmLayout &Layout, MCLEBFragment &LF) { + int64_t Value = 0; + uint64_t OldSize = LF.getContents().size(); + LF.getValue().EvaluateAsAbsolute(Value, Layout); + SmallString<8> &Data = LF.getContents(); + Data.clear(); + raw_svector_ostream OSE(Data); + if (LF.isSigned()) + MCObjectWriter::EncodeSLEB128(Value, OSE); + else + MCObjectWriter::EncodeULEB128(Value, OSE); + OSE.flush(); + return OldSize != LF.getContents().size(); +} + +bool MCAssembler::RelaxDwarfLineAddr(MCAsmLayout &Layout, + MCDwarfLineAddrFragment &DF) { + int64_t AddrDelta = 0; + uint64_t OldSize = DF.getContents().size(); + bool IsAbs = DF.getAddrDelta().EvaluateAsAbsolute(AddrDelta, Layout); + (void)IsAbs; + assert(IsAbs); + int64_t LineDelta; + LineDelta = DF.getLineDelta(); + SmallString<8> &Data = DF.getContents(); + Data.clear(); + raw_svector_ostream OSE(Data); + MCDwarfLineAddr::Encode(LineDelta, AddrDelta, OSE); + OSE.flush(); + return OldSize != Data.size(); +} + +bool MCAssembler::RelaxDwarfCallFrameFragment(MCAsmLayout &Layout, + MCDwarfCallFrameFragment &DF) { + int64_t AddrDelta = 0; + uint64_t OldSize = DF.getContents().size(); + bool IsAbs = DF.getAddrDelta().EvaluateAsAbsolute(AddrDelta, Layout); + (void)IsAbs; + assert(IsAbs); + SmallString<8> &Data = DF.getContents(); + Data.clear(); + raw_svector_ostream OSE(Data); + MCDwarfFrameEmitter::EncodeAdvanceLoc(AddrDelta, OSE); + OSE.flush(); + return OldSize != Data.size(); +} + +bool MCAssembler::LayoutSectionOnce(MCAsmLayout &Layout, + MCSectionData &SD) { + MCFragment *FirstInvalidFragment = NULL; // Scan for fragments that need relaxation. + for (MCSectionData::iterator it2 = SD.begin(), + ie2 = SD.end(); it2 != ie2; ++it2) { + // Check if this is an fragment that needs relaxation. + bool relaxedFrag = false; + switch(it2->getKind()) { + default: + break; + case MCFragment::FT_Inst: + relaxedFrag = RelaxInstruction(Layout, *cast(it2)); + break; + case MCFragment::FT_Dwarf: + relaxedFrag = RelaxDwarfLineAddr(Layout, + *cast(it2)); + break; + case MCFragment::FT_DwarfFrame: + relaxedFrag = + RelaxDwarfCallFrameFragment(Layout, + *cast(it2)); + break; + case MCFragment::FT_LEB: + relaxedFrag = RelaxLEB(Layout, *cast(it2)); + break; + } + // Update the layout, and remember that we relaxed. + if (relaxedFrag && !FirstInvalidFragment) + FirstInvalidFragment = it2; + } + if (FirstInvalidFragment) { + Layout.Invalidate(FirstInvalidFragment); + return true; + } + return false; +} + +bool MCAssembler::LayoutOnce(MCAsmLayout &Layout) { + ++stats::RelaxationSteps; + bool WasRelaxed = false; for (iterator it = begin(), ie = end(); it != ie; ++it) { MCSectionData &SD = *it; - - for (MCSectionData::iterator it2 = SD.begin(), - ie2 = SD.end(); it2 != ie2; ++it2) { - // Check if this is an instruction fragment that needs relaxation. - MCInstFragment *IF = dyn_cast(it2); - if (!IF || !FragmentNeedsRelaxation(IF, Layout)) - continue; - - ++stats::RelaxedInstructions; - - // FIXME-PERF: We could immediately lower out instructions if we can tell - // they are fully resolved, to avoid retesting on later passes. - - // Relax the fragment. - - MCInst Relaxed; - getBackend().RelaxInstruction(IF, Relaxed); - - // Encode the new instruction. - // - // FIXME-PERF: If it matters, we could let the target do this. It can - // probably do so more efficiently in many cases. - SmallVector Fixups; - SmallString<256> Code; - raw_svector_ostream VecOS(Code); - getEmitter().EncodeInstruction(Relaxed, VecOS, Fixups); - VecOS.flush(); - - // Update the instruction fragment. - int SlideAmount = Code.size() - IF->getInstSize(); - IF->setInst(Relaxed); - IF->getCode() = Code; - IF->getFixups().clear(); - for (unsigned i = 0, e = Fixups.size(); i != e; ++i) { - MCFixup &F = Fixups[i]; - IF->getFixups().push_back(MCAsmFixup(F.getOffset(), *F.getValue(), - F.getKind())); - } - - // Update the layout, and remember that we relaxed. If we are relaxing - // everything, we can skip this step since nothing will depend on updating - // the values. - if (!getRelaxAll()) - Layout.UpdateForSlide(IF, SlideAmount); + while(LayoutSectionOnce(Layout, SD)) WasRelaxed = true; - } } return WasRelaxed; } void MCAssembler::FinishLayout(MCAsmLayout &Layout) { - // Lower out any instruction fragments, to simplify the fixup application and - // output. - // - // FIXME-PERF: We don't have to do this, but the assumption is that it is - // cheap (we will mostly end up eliminating fragments and appending on to data - // fragments), so the extra complexity downstream isn't worth it. Evaluate - // this assumption. - for (iterator it = begin(), ie = end(); it != ie; ++it) { - MCSectionData &SD = *it; - - for (MCSectionData::iterator it2 = SD.begin(), - ie2 = SD.end(); it2 != ie2; ++it2) { - MCInstFragment *IF = dyn_cast(it2); - if (!IF) - continue; - - // Create a new data fragment for the instruction. - // - // FIXME-PERF: Reuse previous data fragment if possible. - MCDataFragment *DF = new MCDataFragment(); - SD.getFragmentList().insert(it2, DF); - - // Update the data fragments layout data. - // - // FIXME: Add MCAsmLayout utility for this. - DF->setParent(IF->getParent()); - DF->setAtom(IF->getAtom()); - DF->setOrdinal(IF->getOrdinal()); - Layout.setFragmentOffset(DF, Layout.getFragmentOffset(IF)); - Layout.setFragmentEffectiveSize(DF, Layout.getFragmentEffectiveSize(IF)); - - // Copy in the data and the fixups. - DF->getContents().append(IF->getCode().begin(), IF->getCode().end()); - for (unsigned i = 0, e = IF->getFixups().size(); i != e; ++i) - DF->getFixups().push_back(IF->getFixups()[i]); - - // Delete the instruction fragment and update the iterator. - SD.getFragmentList().erase(IF); - it2 = DF; - } + // The layout is done. Mark every fragment as valid. + for (unsigned int i = 0, n = Layout.getSectionOrder().size(); i != n; ++i) { + Layout.getFragmentOffset(&*Layout.getSectionOrder()[i]->rbegin()); } } @@ -815,9 +798,10 @@ void MCAssembler::FinishLayout(MCAsmLayout &Layout) { namespace llvm { -raw_ostream &operator<<(raw_ostream &OS, const MCAsmFixup &AF) { - OS << ""; +raw_ostream &operator<<(raw_ostream &OS, const MCFixup &AF) { + OS << ""; return OS; } @@ -826,95 +810,102 @@ raw_ostream &operator<<(raw_ostream &OS, const MCAsmFixup &AF) { void MCFragment::dump() { raw_ostream &OS = llvm::errs(); - OS << ""; -} - -void MCAlignFragment::dump() { - raw_ostream &OS = llvm::errs(); - - OS << "MCFragment::dump(); - OS << "\n "; - OS << " Alignment:" << getAlignment() - << " Value:" << getValue() << " ValueSize:" << getValueSize() - << " MaxBytesToEmit:" << getMaxBytesToEmit() << ">"; -} + OS << "<"; + switch (getKind()) { + case MCFragment::FT_Align: OS << "MCAlignFragment"; break; + case MCFragment::FT_Data: OS << "MCDataFragment"; break; + case MCFragment::FT_Fill: OS << "MCFillFragment"; break; + case MCFragment::FT_Inst: OS << "MCInstFragment"; break; + case MCFragment::FT_Org: OS << "MCOrgFragment"; break; + case MCFragment::FT_Dwarf: OS << "MCDwarfFragment"; break; + case MCFragment::FT_DwarfFrame: OS << "MCDwarfCallFrameFragment"; break; + case MCFragment::FT_LEB: OS << "MCLEBFragment"; break; + } -void MCDataFragment::dump() { - raw_ostream &OS = llvm::errs(); + OS << ""; - OS << "MCFragment::dump(); - OS << "\n "; - OS << " Contents:["; - for (unsigned i = 0, e = getContents().size(); i != e; ++i) { - if (i) OS << ","; - OS << hexdigit((Contents[i] >> 4) & 0xF) << hexdigit(Contents[i] & 0xF); - } - OS << "] (" << getContents().size() << " bytes)"; - - if (!getFixups().empty()) { - OS << ",\n "; - OS << " Fixups:["; - for (fixup_iterator it = fixup_begin(), ie = fixup_end(); it != ie; ++it) { - if (it != fixup_begin()) OS << ",\n "; - OS << *it; + switch (getKind()) { + case MCFragment::FT_Align: { + const MCAlignFragment *AF = cast(this); + if (AF->hasEmitNops()) + OS << " (emit nops)"; + OS << "\n "; + OS << " Alignment:" << AF->getAlignment() + << " Value:" << AF->getValue() << " ValueSize:" << AF->getValueSize() + << " MaxBytesToEmit:" << AF->getMaxBytesToEmit() << ">"; + break; + } + case MCFragment::FT_Data: { + const MCDataFragment *DF = cast(this); + OS << "\n "; + OS << " Contents:["; + const SmallVectorImpl &Contents = DF->getContents(); + for (unsigned i = 0, e = Contents.size(); i != e; ++i) { + if (i) OS << ","; + OS << hexdigit((Contents[i] >> 4) & 0xF) << hexdigit(Contents[i] & 0xF); } - OS << "]"; + OS << "] (" << Contents.size() << " bytes)"; + + if (!DF->getFixups().empty()) { + OS << ",\n "; + OS << " Fixups:["; + for (MCDataFragment::const_fixup_iterator it = DF->fixup_begin(), + ie = DF->fixup_end(); it != ie; ++it) { + if (it != DF->fixup_begin()) OS << ",\n "; + OS << *it; + } + OS << "]"; + } + break; + } + case MCFragment::FT_Fill: { + const MCFillFragment *FF = cast(this); + OS << " Value:" << FF->getValue() << " ValueSize:" << FF->getValueSize() + << " Size:" << FF->getSize(); + break; + } + case MCFragment::FT_Inst: { + const MCInstFragment *IF = cast(this); + OS << "\n "; + OS << " Inst:"; + IF->getInst().dump_pretty(OS); + break; + } + case MCFragment::FT_Org: { + const MCOrgFragment *OF = cast(this); + OS << "\n "; + OS << " Offset:" << OF->getOffset() << " Value:" << OF->getValue(); + break; + } + case MCFragment::FT_Dwarf: { + const MCDwarfLineAddrFragment *OF = cast(this); + OS << "\n "; + OS << " AddrDelta:" << OF->getAddrDelta() + << " LineDelta:" << OF->getLineDelta(); + break; + } + case MCFragment::FT_DwarfFrame: { + const MCDwarfCallFrameFragment *CF = cast(this); + OS << "\n "; + OS << " AddrDelta:" << CF->getAddrDelta(); + break; + } + case MCFragment::FT_LEB: { + const MCLEBFragment *LF = cast(this); + OS << "\n "; + OS << " Value:" << LF->getValue() << " Signed:" << LF->isSigned(); + break; + } } - - OS << ">"; -} - -void MCFillFragment::dump() { - raw_ostream &OS = llvm::errs(); - - OS << "MCFragment::dump(); - OS << "\n "; - OS << " Value:" << getValue() << " ValueSize:" << getValueSize() - << " Size:" << getSize() << ">"; -} - -void MCInstFragment::dump() { - raw_ostream &OS = llvm::errs(); - - OS << "MCFragment::dump(); - OS << "\n "; - OS << " Inst:"; - getInst().dump_pretty(OS); OS << ">"; } -void MCOrgFragment::dump() { - raw_ostream &OS = llvm::errs(); - - OS << "MCFragment::dump(); - OS << "\n "; - OS << " Offset:" << getOffset() << " Value:" << getValue() << ">"; -} - -void MCZeroFillFragment::dump() { - raw_ostream &OS = llvm::errs(); - - OS << "MCFragment::dump(); - OS << "\n "; - OS << " Size:" << getSize() << ">"; -} - void MCSectionData::dump() { raw_ostream &OS = llvm::errs(); OS << "dump();