X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FExecutionEngine%2FRuntimeDyld%2FTargets%2FRuntimeDyldMachOAArch64.h;h=7bf764114bae8b4e7a61354e700252a101c91a7d;hb=e84d8c12d5157a926db15976389f703809c49aa5;hp=486cb56dc11c315044a284655f16e20f5cf1903f;hpb=6f2f090b0643b0a1500bdfa5bebf7ead83f8a82b;p=oota-llvm.git diff --git a/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h b/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h index 486cb56dc11..7bf764114ba 100644 --- a/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h +++ b/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h @@ -7,10 +7,11 @@ // //===----------------------------------------------------------------------===// -#ifndef LLVM_RUNTIMEDYLDMACHOAARCH64_H -#define LLVM_RUNTIMEDYLDMACHOAARCH64_H +#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOAARCH64_H +#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOAARCH64_H #include "../RuntimeDyldMachO.h" +#include "llvm/Support/Endian.h" #define DEBUG_TYPE "dyld" @@ -19,23 +20,29 @@ namespace llvm { class RuntimeDyldMachOAArch64 : public RuntimeDyldMachOCRTPBase { public: - RuntimeDyldMachOAArch64(RTDyldMemoryManager *MM) - : RuntimeDyldMachOCRTPBase(MM) {} + + typedef uint64_t TargetPtrT; + + RuntimeDyldMachOAArch64(RuntimeDyld::MemoryManager &MM, + RuntimeDyld::SymbolResolver &Resolver) + : RuntimeDyldMachOCRTPBase(MM, Resolver) {} unsigned getMaxStubSize() override { return 8; } unsigned getStubAlignment() override { return 8; } /// Extract the addend encoded in the instruction / memory location. - int64_t decodeAddend(uint8_t *LocalAddress, unsigned NumBytes, - uint32_t RelType) const { + int64_t decodeAddend(const RelocationEntry &RE) const { + const SectionEntry &Section = Sections[RE.SectionID]; + uint8_t *LocalAddress = Section.Address + RE.Offset; + unsigned NumBytes = 1 << RE.Size; int64_t Addend = 0; // Verify that the relocation has the correct size and alignment. - switch (RelType) { + switch (RE.RelType) { default: llvm_unreachable("Unsupported relocation type!"); case MachO::ARM64_RELOC_UNSIGNED: - assert((NumBytes >= 4 && NumBytes <= 8) && "Invalid relocation size."); + assert((NumBytes == 4 || NumBytes == 8) && "Invalid relocation size."); break; case MachO::ARM64_RELOC_BRANCH26: case MachO::ARM64_RELOC_PAGE21: @@ -48,16 +55,19 @@ public: break; } - switch (RelType) { + switch (RE.RelType) { default: llvm_unreachable("Unsupported relocation type!"); case MachO::ARM64_RELOC_UNSIGNED: - // This could be an unaligned memory location - use memcpy. - memcpy(&Addend, LocalAddress, NumBytes); + // This could be an unaligned memory location. + if (NumBytes == 4) + Addend = *reinterpret_cast(LocalAddress); + else + Addend = *reinterpret_cast(LocalAddress); break; case MachO::ARM64_RELOC_BRANCH26: { // Verify that the relocation points to the expected branch instruction. - uint32_t *p = (uint32_t *)LocalAddress; + auto *p = reinterpret_cast(LocalAddress); assert((*p & 0xFC000000) == 0x14000000 && "Expected branch instruction."); // Get the 26 bit addend encoded in the branch instruction and sign-extend @@ -70,7 +80,7 @@ public: case MachO::ARM64_RELOC_GOT_LOAD_PAGE21: case MachO::ARM64_RELOC_PAGE21: { // Verify that the relocation points to the expected adrp instruction. - uint32_t *p = (uint32_t *)LocalAddress; + auto *p = reinterpret_cast(LocalAddress); assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction."); // Get the 21 bit addend encoded in the adrp instruction and sign-extend @@ -83,14 +93,15 @@ public: case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: { // Verify that the relocation points to one of the expected load / store // instructions. - uint32_t *p = (uint32_t *)LocalAddress; + auto *p = reinterpret_cast(LocalAddress); + (void)p; assert((*p & 0x3B000000) == 0x39000000 && "Only expected load / store instructions."); } // fall-through case MachO::ARM64_RELOC_PAGEOFF12: { // Verify that the relocation points to one of the expected load / store // or add / sub instructions. - uint32_t *p = (uint32_t *)LocalAddress; + auto *p = reinterpret_cast(LocalAddress); assert((((*p & 0x3B000000) == 0x39000000) || ((*p & 0x11C00000) == 0x11000000) ) && "Expected load / store or add/sub instruction."); @@ -118,12 +129,126 @@ public: return Addend; } + /// Extract the addend encoded in the instruction. + void encodeAddend(uint8_t *LocalAddress, unsigned NumBytes, + MachO::RelocationInfoType RelType, int64_t Addend) const { + // Verify that the relocation has the correct alignment. + switch (RelType) { + default: + llvm_unreachable("Unsupported relocation type!"); + case MachO::ARM64_RELOC_UNSIGNED: + assert((NumBytes == 4 || NumBytes == 8) && "Invalid relocation size."); + break; + case MachO::ARM64_RELOC_BRANCH26: + case MachO::ARM64_RELOC_PAGE21: + case MachO::ARM64_RELOC_PAGEOFF12: + case MachO::ARM64_RELOC_GOT_LOAD_PAGE21: + case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: + assert(NumBytes == 4 && "Invalid relocation size."); + assert((((uintptr_t)LocalAddress & 0x3) == 0) && + "Instruction address is not aligned to 4 bytes."); + break; + } + + switch (RelType) { + default: + llvm_unreachable("Unsupported relocation type!"); + case MachO::ARM64_RELOC_UNSIGNED: + // This could be an unaligned memory location. + if (NumBytes == 4) + *reinterpret_cast(LocalAddress) = Addend; + else + *reinterpret_cast(LocalAddress) = Addend; + break; + case MachO::ARM64_RELOC_BRANCH26: { + auto *p = reinterpret_cast(LocalAddress); + // Verify that the relocation points to the expected branch instruction. + assert((*p & 0xFC000000) == 0x14000000 && "Expected branch instruction."); + + // Verify addend value. + assert((Addend & 0x3) == 0 && "Branch target is not aligned"); + assert(isInt<28>(Addend) && "Branch target is out of range."); + + // Encode the addend as 26 bit immediate in the branch instruction. + *p = (*p & 0xFC000000) | ((uint32_t)(Addend >> 2) & 0x03FFFFFF); + break; + } + case MachO::ARM64_RELOC_GOT_LOAD_PAGE21: + case MachO::ARM64_RELOC_PAGE21: { + // Verify that the relocation points to the expected adrp instruction. + auto *p = reinterpret_cast(LocalAddress); + assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction."); + + // Check that the addend fits into 21 bits (+ 12 lower bits). + assert((Addend & 0xFFF) == 0 && "ADRP target is not page aligned."); + assert(isInt<33>(Addend) && "Invalid page reloc value."); + + // Encode the addend into the instruction. + uint32_t ImmLoValue = ((uint64_t)Addend << 17) & 0x60000000; + uint32_t ImmHiValue = ((uint64_t)Addend >> 9) & 0x00FFFFE0; + *p = (*p & 0x9F00001F) | ImmHiValue | ImmLoValue; + break; + } + case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: { + // Verify that the relocation points to one of the expected load / store + // instructions. + auto *p = reinterpret_cast(LocalAddress); + assert((*p & 0x3B000000) == 0x39000000 && + "Only expected load / store instructions."); + (void)p; + } // fall-through + case MachO::ARM64_RELOC_PAGEOFF12: { + // Verify that the relocation points to one of the expected load / store + // or add / sub instructions. + auto *p = reinterpret_cast(LocalAddress); + assert((((*p & 0x3B000000) == 0x39000000) || + ((*p & 0x11C00000) == 0x11000000) ) && + "Expected load / store or add/sub instruction."); + + // Check which instruction we are decoding to obtain the implicit shift + // factor of the instruction and verify alignment. + int ImplicitShift = 0; + if ((*p & 0x3B000000) == 0x39000000) { // << load / store + // For load / store instructions the size is encoded in bits 31:30. + ImplicitShift = ((*p >> 30) & 0x3); + switch (ImplicitShift) { + case 0: + // Check if this a vector op to get the correct shift value. + if ((*p & 0x04800000) == 0x04800000) { + ImplicitShift = 4; + assert(((Addend & 0xF) == 0) && + "128-bit LDR/STR not 16-byte aligned."); + } + break; + case 1: + assert(((Addend & 0x1) == 0) && "16-bit LDR/STR not 2-byte aligned."); + break; + case 2: + assert(((Addend & 0x3) == 0) && "32-bit LDR/STR not 4-byte aligned."); + break; + case 3: + assert(((Addend & 0x7) == 0) && "64-bit LDR/STR not 8-byte aligned."); + break; + } + } + // Compensate for implicit shift. + Addend >>= ImplicitShift; + assert(isUInt<12>(Addend) && "Addend cannot be encoded."); + + // Encode the addend into the instruction. + *p = (*p & 0xFFC003FF) | ((uint32_t)(Addend << 10) & 0x003FFC00); + break; + } + } + } + relocation_iterator processRelocationRef(unsigned SectionID, relocation_iterator RelI, - ObjectImage &ObjImg, ObjSectionToIDMap &ObjSectionToID, - const SymbolTableMap &Symbols, StubMap &Stubs) override { + const ObjectFile &BaseObjT, + ObjSectionToIDMap &ObjSectionToID, + StubMap &Stubs) override { const MachOObjectFile &Obj = - static_cast(*ObjImg.getObjectFile()); + static_cast(BaseObjT); MachO::any_relocation_info RelInfo = Obj.getRelocation(RelI->getRawDataRefImpl()); @@ -145,22 +270,23 @@ public: RelInfo = Obj.getRelocation(RelI->getRawDataRefImpl()); } - RelocationEntry RE(getBasicRelocationEntry(SectionID, ObjImg, RelI)); + RelocationEntry RE(getRelocationEntry(SectionID, Obj, RelI)); + RE.Addend = decodeAddend(RE); RelocationValueRef Value( - getRelocationValueRef(ObjImg, RelI, RE, ObjSectionToID, Symbols)); + getRelocationValueRef(Obj, RelI, RE, ObjSectionToID)); assert((ExplicitAddend == 0 || RE.Addend == 0) && "Relocation has "\ "ARM64_RELOC_ADDEND and embedded addend in the instruction."); if (ExplicitAddend) { RE.Addend = ExplicitAddend; - Value.Addend = ExplicitAddend; + Value.Offset = ExplicitAddend; } bool IsExtern = Obj.getPlainRelocationExternal(RelInfo); if (!IsExtern && RE.IsPCRel) - makeValueAddendPCRel(Value, ObjImg, RelI); + makeValueAddendPCRel(Value, RelI, 1 << RE.Size); - RE.Addend = Value.Addend; + RE.Addend = Value.Offset; if (RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGE21 || RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12) @@ -175,13 +301,15 @@ public: return ++RelI; } - void resolveRelocation(const RelocationEntry &RE, uint64_t Value) { + void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override { DEBUG(dumpRelocationToResolve(RE, Value)); const SectionEntry &Section = Sections[RE.SectionID]; uint8_t *LocalAddress = Section.Address + RE.Offset; + MachO::RelocationInfoType RelType = + static_cast(RE.RelType); - switch (RE.RelType) { + switch (RelType) { default: llvm_unreachable("Invalid relocation type!"); case MachO::ARM64_RELOC_UNSIGNED: { @@ -191,117 +319,49 @@ public: if (RE.Size < 2) llvm_unreachable("Invalid size for ARM64_RELOC_UNSIGNED"); - writeBytesUnaligned(LocalAddress, Value + RE.Addend, 1 << RE.Size); + encodeAddend(LocalAddress, 1 << RE.Size, RelType, Value + RE.Addend); break; } case MachO::ARM64_RELOC_BRANCH26: { assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_BRANCH26 not supported"); - // Mask the value into the target address. We know instructions are - // 32-bit aligned, so we can do it all at once. - uint32_t *p = (uint32_t *)LocalAddress; - // Check if the addend is encoded in the instruction. - uint32_t EncodedAddend = *p & 0x03FFFFFF; - if (EncodedAddend != 0) { - if (RE.Addend == 0) - llvm_unreachable("branch26 instruction has embedded addend."); - else - llvm_unreachable("branch26 instruction has embedded addend and" - "ARM64_RELOC_ADDEND."); - } // Check if branch is in range. uint64_t FinalAddress = Section.LoadAddress + RE.Offset; - uint64_t PCRelVal = Value - FinalAddress + RE.Addend; - assert(isInt<26>(PCRelVal) && "Branch target out of range!"); - // Insert the value into the instruction. - *p = (*p & 0xFC000000) | ((uint32_t)(PCRelVal >> 2) & 0x03FFFFFF); + int64_t PCRelVal = Value - FinalAddress + RE.Addend; + encodeAddend(LocalAddress, /*Size=*/4, RelType, PCRelVal); break; } case MachO::ARM64_RELOC_GOT_LOAD_PAGE21: case MachO::ARM64_RELOC_PAGE21: { assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_PAGE21 not supported"); - // Mask the value into the target address. We know instructions are - // 32-bit aligned, so we can do it all at once. - uint32_t *p = (uint32_t *)LocalAddress; - // Check if the addend is encoded in the instruction. - uint32_t EncodedAddend = - ((*p & 0x60000000) >> 29) | ((*p & 0x01FFFFE0) >> 3); - if (EncodedAddend != 0) { - if (RE.Addend == 0) - llvm_unreachable("adrp instruction has embedded addend."); - else - llvm_unreachable("adrp instruction has embedded addend and" - "ARM64_RELOC_ADDEND."); - } // Adjust for PC-relative relocation and offset. uint64_t FinalAddress = Section.LoadAddress + RE.Offset; - uint64_t PCRelVal = - ((Value + RE.Addend) & (-4096)) - (FinalAddress & (-4096)); - // Check that the value fits into 21 bits (+ 12 lower bits). - assert(isInt<33>(PCRelVal) && "Invalid page reloc value!"); - // Insert the value into the instruction. - uint32_t ImmLoValue = (uint32_t)(PCRelVal << 17) & 0x60000000; - uint32_t ImmHiValue = (uint32_t)(PCRelVal >> 9) & 0x00FFFFE0; - *p = (*p & 0x9F00001F) | ImmHiValue | ImmLoValue; + int64_t PCRelVal = + ((Value + RE.Addend) & (-4096)) - (FinalAddress & (-4096)); + encodeAddend(LocalAddress, /*Size=*/4, RelType, PCRelVal); break; } case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: case MachO::ARM64_RELOC_PAGEOFF12: { assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_PAGEOFF21 not supported"); - // Mask the value into the target address. We know instructions are - // 32-bit aligned, so we can do it all at once. - uint32_t *p = (uint32_t *)LocalAddress; - // Check if the addend is encoded in the instruction. - uint32_t EncodedAddend = *p & 0x003FFC00; - if (EncodedAddend != 0) { - if (RE.Addend == 0) - llvm_unreachable("adrp instruction has embedded addend."); - else - llvm_unreachable("adrp instruction has embedded addend and" - "ARM64_RELOC_ADDEND."); - } // Add the offset from the symbol. Value += RE.Addend; // Mask out the page address and only use the lower 12 bits. Value &= 0xFFF; - // Check which instruction we are updating to obtain the implicit shift - // factor from LDR/STR instructions. - if (*p & 0x08000000) { - uint32_t ImplicitShift = ((*p >> 30) & 0x3); - switch (ImplicitShift) { - case 0: - // Check if this a vector op. - if ((*p & 0x04800000) == 0x04800000) { - ImplicitShift = 4; - assert(((Value & 0xF) == 0) && - "128-bit LDR/STR not 16-byte aligned."); - } - break; - case 1: - assert(((Value & 0x1) == 0) && "16-bit LDR/STR not 2-byte aligned."); - case 2: - assert(((Value & 0x3) == 0) && "32-bit LDR/STR not 4-byte aligned."); - case 3: - assert(((Value & 0x7) == 0) && "64-bit LDR/STR not 8-byte aligned."); - } - // Compensate for implicit shift. - Value >>= ImplicitShift; - } - // Insert the value into the instruction. - *p = (*p & 0xFFC003FF) | ((uint32_t)(Value << 10) & 0x003FFC00); + encodeAddend(LocalAddress, /*Size=*/4, RelType, Value); break; } case MachO::ARM64_RELOC_SUBTRACTOR: case MachO::ARM64_RELOC_POINTER_TO_GOT: case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21: case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12: - llvm_unreachable("Relocation type not implemented yet!"); + llvm_unreachable("Relocation type not yet implemented!"); case MachO::ARM64_RELOC_ADDEND: llvm_unreachable("ARM64_RELOC_ADDEND should have been handeled by " "processRelocationRef!"); } } - void finalizeSection(ObjectImage &ObjImg, unsigned SectionID, + void finalizeSection(const ObjectFile &Obj, unsigned SectionID, const SectionRef &Section) {} private: @@ -310,9 +370,9 @@ private: assert(RE.Size == 2); SectionEntry &Section = Sections[RE.SectionID]; StubMap::const_iterator i = Stubs.find(Value); - uint8_t *Addr; + int64_t Offset; if (i != Stubs.end()) - Addr = Section.Address + i->second; + Offset = static_cast(i->second); else { // FIXME: There must be a better way to do this then to check and fix the // alignment every time!!! @@ -326,22 +386,22 @@ private: assert(((StubAddress % getStubAlignment()) == 0) && "GOT entry not aligned"); RelocationEntry GOTRE(RE.SectionID, StubOffset, - MachO::ARM64_RELOC_UNSIGNED, Value.Addend, + MachO::ARM64_RELOC_UNSIGNED, Value.Offset, /*IsPCRel=*/false, /*Size=*/3); if (Value.SymbolName) addRelocationForSymbol(GOTRE, Value.SymbolName); else addRelocationForSection(GOTRE, Value.SectionID); Section.StubOffset = StubOffset + getMaxStubSize(); - Addr = (uint8_t *)StubAddress; + Offset = static_cast(StubOffset); } - RelocationEntry TargetRE(RE.SectionID, RE.Offset, RE.RelType, /*Addend=*/0, + RelocationEntry TargetRE(RE.SectionID, RE.Offset, RE.RelType, Offset, RE.IsPCRel, RE.Size); - resolveRelocation(TargetRE, (uint64_t)Addr); + addRelocationForSection(TargetRE, RE.SectionID); } }; } #undef DEBUG_TYPE -#endif // LLVM_RUNTIMEDYLDMACHOAARCH64_H +#endif