1 //===-- RuntimeDyldMachOAArch64.h -- MachO/AArch64 specific code. -*- C++ -*-=//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOAARCH64_H
11 #define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOAARCH64_H
13 #include "../RuntimeDyldMachO.h"
14 #include "llvm/Support/Endian.h"
16 #define DEBUG_TYPE "dyld"
20 class RuntimeDyldMachOAArch64
21 : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOAArch64> {
24 typedef uint64_t TargetPtrT;
26 RuntimeDyldMachOAArch64(RuntimeDyld::MemoryManager &MM,
27 RuntimeDyld::SymbolResolver &Resolver)
28 : RuntimeDyldMachOCRTPBase(MM, Resolver) {}
30 unsigned getMaxStubSize() override { return 8; }
32 unsigned getStubAlignment() override { return 8; }
34 /// Extract the addend encoded in the instruction / memory location.
35 int64_t decodeAddend(const RelocationEntry &RE) const {
36 const SectionEntry &Section = Sections[RE.SectionID];
37 uint8_t *LocalAddress = Section.Address + RE.Offset;
38 unsigned NumBytes = 1 << RE.Size;
40 // Verify that the relocation has the correct size and alignment.
43 llvm_unreachable("Unsupported relocation type!");
44 case MachO::ARM64_RELOC_UNSIGNED:
45 assert((NumBytes == 4 || NumBytes == 8) && "Invalid relocation size.");
47 case MachO::ARM64_RELOC_BRANCH26:
48 case MachO::ARM64_RELOC_PAGE21:
49 case MachO::ARM64_RELOC_PAGEOFF12:
50 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
51 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
52 assert(NumBytes == 4 && "Invalid relocation size.");
53 assert((((uintptr_t)LocalAddress & 0x3) == 0) &&
54 "Instruction address is not aligned to 4 bytes.");
60 llvm_unreachable("Unsupported relocation type!");
61 case MachO::ARM64_RELOC_UNSIGNED:
62 // This could be an unaligned memory location.
64 Addend = *reinterpret_cast<support::ulittle32_t *>(LocalAddress);
66 Addend = *reinterpret_cast<support::ulittle64_t *>(LocalAddress);
68 case MachO::ARM64_RELOC_BRANCH26: {
69 // Verify that the relocation points to the expected branch instruction.
70 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
71 assert((*p & 0xFC000000) == 0x14000000 && "Expected branch instruction.");
73 // Get the 26 bit addend encoded in the branch instruction and sign-extend
74 // to 64 bit. The lower 2 bits are always zeros and are therefore implicit
76 Addend = (*p & 0x03FFFFFF) << 2;
77 Addend = SignExtend64(Addend, 28);
80 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
81 case MachO::ARM64_RELOC_PAGE21: {
82 // Verify that the relocation points to the expected adrp instruction.
83 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
84 assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction.");
86 // Get the 21 bit addend encoded in the adrp instruction and sign-extend
87 // to 64 bit. The lower 12 bits (4096 byte page) are always zeros and are
88 // therefore implicit (<< 12).
89 Addend = ((*p & 0x60000000) >> 29) | ((*p & 0x01FFFFE0) >> 3) << 12;
90 Addend = SignExtend64(Addend, 33);
93 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: {
94 // Verify that the relocation points to one of the expected load / store
96 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
98 assert((*p & 0x3B000000) == 0x39000000 &&
99 "Only expected load / store instructions.");
101 case MachO::ARM64_RELOC_PAGEOFF12: {
102 // Verify that the relocation points to one of the expected load / store
103 // or add / sub instructions.
104 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
105 assert((((*p & 0x3B000000) == 0x39000000) ||
106 ((*p & 0x11C00000) == 0x11000000) ) &&
107 "Expected load / store or add/sub instruction.");
109 // Get the 12 bit addend encoded in the instruction.
110 Addend = (*p & 0x003FFC00) >> 10;
112 // Check which instruction we are decoding to obtain the implicit shift
113 // factor of the instruction.
114 int ImplicitShift = 0;
115 if ((*p & 0x3B000000) == 0x39000000) { // << load / store
116 // For load / store instructions the size is encoded in bits 31:30.
117 ImplicitShift = ((*p >> 30) & 0x3);
118 if (ImplicitShift == 0) {
119 // Check if this a vector op to get the correct shift value.
120 if ((*p & 0x04800000) == 0x04800000)
124 // Compensate for implicit shift.
125 Addend <<= ImplicitShift;
132 /// Extract the addend encoded in the instruction.
133 void encodeAddend(uint8_t *LocalAddress, unsigned NumBytes,
134 MachO::RelocationInfoType RelType, int64_t Addend) const {
135 // Verify that the relocation has the correct alignment.
138 llvm_unreachable("Unsupported relocation type!");
139 case MachO::ARM64_RELOC_UNSIGNED:
140 assert((NumBytes == 4 || NumBytes == 8) && "Invalid relocation size.");
142 case MachO::ARM64_RELOC_BRANCH26:
143 case MachO::ARM64_RELOC_PAGE21:
144 case MachO::ARM64_RELOC_PAGEOFF12:
145 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
146 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
147 assert(NumBytes == 4 && "Invalid relocation size.");
148 assert((((uintptr_t)LocalAddress & 0x3) == 0) &&
149 "Instruction address is not aligned to 4 bytes.");
155 llvm_unreachable("Unsupported relocation type!");
156 case MachO::ARM64_RELOC_UNSIGNED:
157 // This could be an unaligned memory location.
159 *reinterpret_cast<support::ulittle32_t *>(LocalAddress) = Addend;
161 *reinterpret_cast<support::ulittle64_t *>(LocalAddress) = Addend;
163 case MachO::ARM64_RELOC_BRANCH26: {
164 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
165 // Verify that the relocation points to the expected branch instruction.
166 assert((*p & 0xFC000000) == 0x14000000 && "Expected branch instruction.");
168 // Verify addend value.
169 assert((Addend & 0x3) == 0 && "Branch target is not aligned");
170 assert(isInt<28>(Addend) && "Branch target is out of range.");
172 // Encode the addend as 26 bit immediate in the branch instruction.
173 *p = (*p & 0xFC000000) | ((uint32_t)(Addend >> 2) & 0x03FFFFFF);
176 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
177 case MachO::ARM64_RELOC_PAGE21: {
178 // Verify that the relocation points to the expected adrp instruction.
179 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
180 assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction.");
182 // Check that the addend fits into 21 bits (+ 12 lower bits).
183 assert((Addend & 0xFFF) == 0 && "ADRP target is not page aligned.");
184 assert(isInt<33>(Addend) && "Invalid page reloc value.");
186 // Encode the addend into the instruction.
187 uint32_t ImmLoValue = ((uint64_t)Addend << 17) & 0x60000000;
188 uint32_t ImmHiValue = ((uint64_t)Addend >> 9) & 0x00FFFFE0;
189 *p = (*p & 0x9F00001F) | ImmHiValue | ImmLoValue;
192 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: {
193 // Verify that the relocation points to one of the expected load / store
195 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
196 assert((*p & 0x3B000000) == 0x39000000 &&
197 "Only expected load / store instructions.");
200 case MachO::ARM64_RELOC_PAGEOFF12: {
201 // Verify that the relocation points to one of the expected load / store
202 // or add / sub instructions.
203 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
204 assert((((*p & 0x3B000000) == 0x39000000) ||
205 ((*p & 0x11C00000) == 0x11000000) ) &&
206 "Expected load / store or add/sub instruction.");
208 // Check which instruction we are decoding to obtain the implicit shift
209 // factor of the instruction and verify alignment.
210 int ImplicitShift = 0;
211 if ((*p & 0x3B000000) == 0x39000000) { // << load / store
212 // For load / store instructions the size is encoded in bits 31:30.
213 ImplicitShift = ((*p >> 30) & 0x3);
214 switch (ImplicitShift) {
216 // Check if this a vector op to get the correct shift value.
217 if ((*p & 0x04800000) == 0x04800000) {
219 assert(((Addend & 0xF) == 0) &&
220 "128-bit LDR/STR not 16-byte aligned.");
224 assert(((Addend & 0x1) == 0) && "16-bit LDR/STR not 2-byte aligned.");
227 assert(((Addend & 0x3) == 0) && "32-bit LDR/STR not 4-byte aligned.");
230 assert(((Addend & 0x7) == 0) && "64-bit LDR/STR not 8-byte aligned.");
234 // Compensate for implicit shift.
235 Addend >>= ImplicitShift;
236 assert(isUInt<12>(Addend) && "Addend cannot be encoded.");
238 // Encode the addend into the instruction.
239 *p = (*p & 0xFFC003FF) | ((uint32_t)(Addend << 10) & 0x003FFC00);
246 processRelocationRef(unsigned SectionID, relocation_iterator RelI,
247 const ObjectFile &BaseObjT,
248 ObjSectionToIDMap &ObjSectionToID,
249 StubMap &Stubs) override {
250 const MachOObjectFile &Obj =
251 static_cast<const MachOObjectFile &>(BaseObjT);
252 MachO::any_relocation_info RelInfo =
253 Obj.getRelocation(RelI->getRawDataRefImpl());
255 assert(!Obj.isRelocationScattered(RelInfo) && "");
257 // ARM64 has an ARM64_RELOC_ADDEND relocation type that carries an explicit
258 // addend for the following relocation. If found: (1) store the associated
259 // addend, (2) consume the next relocation, and (3) use the stored addend to
260 // override the addend.
261 int64_t ExplicitAddend = 0;
262 if (Obj.getAnyRelocationType(RelInfo) == MachO::ARM64_RELOC_ADDEND) {
263 assert(!Obj.getPlainRelocationExternal(RelInfo));
264 assert(!Obj.getAnyRelocationPCRel(RelInfo));
265 assert(Obj.getAnyRelocationLength(RelInfo) == 2);
266 int64_t RawAddend = Obj.getPlainRelocationSymbolNum(RelInfo);
267 // Sign-extend the 24-bit to 64-bit.
268 ExplicitAddend = SignExtend64(RawAddend, 24);
270 RelInfo = Obj.getRelocation(RelI->getRawDataRefImpl());
273 RelocationEntry RE(getRelocationEntry(SectionID, Obj, RelI));
274 RE.Addend = decodeAddend(RE);
275 RelocationValueRef Value(
276 getRelocationValueRef(Obj, RelI, RE, ObjSectionToID));
278 assert((ExplicitAddend == 0 || RE.Addend == 0) && "Relocation has "\
279 "ARM64_RELOC_ADDEND and embedded addend in the instruction.");
280 if (ExplicitAddend) {
281 RE.Addend = ExplicitAddend;
282 Value.Offset = ExplicitAddend;
285 bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
286 if (!IsExtern && RE.IsPCRel)
287 makeValueAddendPCRel(Value, RelI, 1 << RE.Size);
289 RE.Addend = Value.Offset;
291 if (RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGE21 ||
292 RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12)
293 processGOTRelocation(RE, Value, Stubs);
295 if (Value.SymbolName)
296 addRelocationForSymbol(RE, Value.SymbolName);
298 addRelocationForSection(RE, Value.SectionID);
304 void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
305 DEBUG(dumpRelocationToResolve(RE, Value));
307 const SectionEntry &Section = Sections[RE.SectionID];
308 uint8_t *LocalAddress = Section.Address + RE.Offset;
309 MachO::RelocationInfoType RelType =
310 static_cast<MachO::RelocationInfoType>(RE.RelType);
314 llvm_unreachable("Invalid relocation type!");
315 case MachO::ARM64_RELOC_UNSIGNED: {
316 assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_UNSIGNED not supported");
317 // Mask in the target value a byte at a time (we don't have an alignment
318 // guarantee for the target address, so this is safest).
320 llvm_unreachable("Invalid size for ARM64_RELOC_UNSIGNED");
322 encodeAddend(LocalAddress, 1 << RE.Size, RelType, Value + RE.Addend);
325 case MachO::ARM64_RELOC_BRANCH26: {
326 assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_BRANCH26 not supported");
327 // Check if branch is in range.
328 uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
329 int64_t PCRelVal = Value - FinalAddress + RE.Addend;
330 encodeAddend(LocalAddress, /*Size=*/4, RelType, PCRelVal);
333 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
334 case MachO::ARM64_RELOC_PAGE21: {
335 assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_PAGE21 not supported");
336 // Adjust for PC-relative relocation and offset.
337 uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
339 ((Value + RE.Addend) & (-4096)) - (FinalAddress & (-4096));
340 encodeAddend(LocalAddress, /*Size=*/4, RelType, PCRelVal);
343 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
344 case MachO::ARM64_RELOC_PAGEOFF12: {
345 assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_PAGEOFF21 not supported");
346 // Add the offset from the symbol.
348 // Mask out the page address and only use the lower 12 bits.
350 encodeAddend(LocalAddress, /*Size=*/4, RelType, Value);
353 case MachO::ARM64_RELOC_SUBTRACTOR:
354 case MachO::ARM64_RELOC_POINTER_TO_GOT:
355 case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21:
356 case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12:
357 llvm_unreachable("Relocation type not yet implemented!");
358 case MachO::ARM64_RELOC_ADDEND:
359 llvm_unreachable("ARM64_RELOC_ADDEND should have been handeled by "
360 "processRelocationRef!");
364 void finalizeSection(const ObjectFile &Obj, unsigned SectionID,
365 const SectionRef &Section) {}
368 void processGOTRelocation(const RelocationEntry &RE,
369 RelocationValueRef &Value, StubMap &Stubs) {
370 assert(RE.Size == 2);
371 SectionEntry &Section = Sections[RE.SectionID];
372 StubMap::const_iterator i = Stubs.find(Value);
374 if (i != Stubs.end())
375 Offset = static_cast<int64_t>(i->second);
377 // FIXME: There must be a better way to do this then to check and fix the
378 // alignment every time!!!
379 uintptr_t BaseAddress = uintptr_t(Section.Address);
380 uintptr_t StubAlignment = getStubAlignment();
381 uintptr_t StubAddress =
382 (BaseAddress + Section.StubOffset + StubAlignment - 1) &
384 unsigned StubOffset = StubAddress - BaseAddress;
385 Stubs[Value] = StubOffset;
386 assert(((StubAddress % getStubAlignment()) == 0) &&
387 "GOT entry not aligned");
388 RelocationEntry GOTRE(RE.SectionID, StubOffset,
389 MachO::ARM64_RELOC_UNSIGNED, Value.Offset,
390 /*IsPCRel=*/false, /*Size=*/3);
391 if (Value.SymbolName)
392 addRelocationForSymbol(GOTRE, Value.SymbolName);
394 addRelocationForSection(GOTRE, Value.SectionID);
395 Section.StubOffset = StubOffset + getMaxStubSize();
396 Offset = static_cast<int64_t>(StubOffset);
398 RelocationEntry TargetRE(RE.SectionID, RE.Offset, RE.RelType, Offset,
399 RE.IsPCRel, RE.Size);
400 addRelocationForSection(TargetRE, RE.SectionID);