1 //=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code =//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the AArch64MCCodeEmitter class.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "mccodeemitter"
15 #include "MCTargetDesc/AArch64FixupKinds.h"
16 #include "MCTargetDesc/AArch64MCExpr.h"
17 #include "MCTargetDesc/AArch64MCTargetDesc.h"
18 #include "Utils/AArch64BaseInfo.h"
19 #include "llvm/MC/MCCodeEmitter.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCInst.h"
22 #include "llvm/MC/MCInstrInfo.h"
23 #include "llvm/MC/MCRegisterInfo.h"
24 #include "llvm/MC/MCSubtargetInfo.h"
25 #include "llvm/Support/ErrorHandling.h"
26 #include "llvm/Support/raw_ostream.h"
31 class AArch64MCCodeEmitter : public MCCodeEmitter {
32 AArch64MCCodeEmitter(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION;
33 void operator=(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION;
37 AArch64MCCodeEmitter(MCContext &ctx) : Ctx(ctx) {}
39 ~AArch64MCCodeEmitter() {}
41 unsigned getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
42 SmallVectorImpl<MCFixup> &Fixups) const;
44 unsigned getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx,
45 SmallVectorImpl<MCFixup> &Fixups) const;
48 unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
49 SmallVectorImpl<MCFixup> &Fixups) const {
50 return getOffsetUImm12OpValue(MI, OpIdx, Fixups, MemSize);
53 unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
54 SmallVectorImpl<MCFixup> &Fixups,
57 unsigned getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx,
58 SmallVectorImpl<MCFixup> &Fixups) const;
59 unsigned getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
60 SmallVectorImpl<MCFixup> &Fixups) const;
62 unsigned getShiftRightImm8(const MCInst &MI, unsigned Op,
63 SmallVectorImpl<MCFixup> &Fixups) const;
64 unsigned getShiftRightImm16(const MCInst &MI, unsigned Op,
65 SmallVectorImpl<MCFixup> &Fixups) const;
66 unsigned getShiftRightImm32(const MCInst &MI, unsigned Op,
67 SmallVectorImpl<MCFixup> &Fixups) const;
68 unsigned getShiftRightImm64(const MCInst &MI, unsigned Op,
69 SmallVectorImpl<MCFixup> &Fixups) const;
71 // Labels are handled mostly the same way: a symbol is needed, and
72 // just gets some fixup attached.
73 template<AArch64::Fixups fixupDesired>
74 unsigned getLabelOpValue(const MCInst &MI, unsigned OpIdx,
75 SmallVectorImpl<MCFixup> &Fixups) const;
77 unsigned getLoadLitLabelOpValue(const MCInst &MI, unsigned OpIdx,
78 SmallVectorImpl<MCFixup> &Fixups) const;
81 unsigned getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
82 SmallVectorImpl<MCFixup> &Fixups) const;
85 unsigned getAddressWithFixup(const MCOperand &MO,
87 SmallVectorImpl<MCFixup> &Fixups) const;
90 // getBinaryCodeForInstr - TableGen'erated function for getting the
91 // binary encoding for an instruction.
92 uint64_t getBinaryCodeForInstr(const MCInst &MI,
93 SmallVectorImpl<MCFixup> &Fixups) const;
95 /// getMachineOpValue - Return binary encoding of operand. If the machine
96 /// operand requires relocation, record the relocation and return zero.
97 unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO,
98 SmallVectorImpl<MCFixup> &Fixups) const;
101 void EmitByte(unsigned char C, raw_ostream &OS) const {
105 void EmitInstruction(uint32_t Val, raw_ostream &OS) const {
106 // Output the constant in little endian byte order.
107 for (unsigned i = 0; i != 4; ++i) {
108 EmitByte(Val & 0xff, OS);
114 void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
115 SmallVectorImpl<MCFixup> &Fixups) const;
117 template<int hasRs, int hasRt2> unsigned
118 fixLoadStoreExclusive(const MCInst &MI, unsigned EncodedValue) const;
120 unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue) const;
122 unsigned fixMulHigh(const MCInst &MI, unsigned EncodedValue) const;
127 } // end anonymous namespace
129 unsigned AArch64MCCodeEmitter::getAddressWithFixup(const MCOperand &MO,
131 SmallVectorImpl<MCFixup> &Fixups) const {
133 // This can occur for manually decoded or constructed MCInsts, but neither
134 // the assembly-parser nor instruction selection will currently produce an
135 // MCInst that's not a symbol reference.
136 assert(MO.isImm() && "Unexpected address requested");
140 const MCExpr *Expr = MO.getExpr();
141 MCFixupKind Kind = MCFixupKind(FixupKind);
142 Fixups.push_back(MCFixup::Create(0, Expr, Kind));
147 unsigned AArch64MCCodeEmitter::
148 getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
149 SmallVectorImpl<MCFixup> &Fixups,
151 const MCOperand &ImmOp = MI.getOperand(OpIdx);
153 return ImmOp.getImm();
155 assert(ImmOp.isExpr() && "Unexpected operand type");
156 const AArch64MCExpr *Expr = cast<AArch64MCExpr>(ImmOp.getExpr());
160 switch (Expr->getKind()) {
161 default: llvm_unreachable("Unexpected operand modifier");
162 case AArch64MCExpr::VK_AARCH64_LO12: {
163 static const unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_lo12,
164 AArch64::fixup_a64_ldst16_lo12,
165 AArch64::fixup_a64_ldst32_lo12,
166 AArch64::fixup_a64_ldst64_lo12,
167 AArch64::fixup_a64_ldst128_lo12 };
168 assert(MemSize <= 16 && "Invalid fixup for operation");
169 FixupKind = FixupsBySize[Log2_32(MemSize)];
172 case AArch64MCExpr::VK_AARCH64_GOT_LO12:
173 assert(MemSize == 8 && "Invalid fixup for operation");
174 FixupKind = AArch64::fixup_a64_ld64_got_lo12_nc;
176 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12: {
177 static const unsigned FixupsBySize[] = {
178 AArch64::fixup_a64_ldst8_dtprel_lo12,
179 AArch64::fixup_a64_ldst16_dtprel_lo12,
180 AArch64::fixup_a64_ldst32_dtprel_lo12,
181 AArch64::fixup_a64_ldst64_dtprel_lo12
183 assert(MemSize <= 8 && "Invalid fixup for operation");
184 FixupKind = FixupsBySize[Log2_32(MemSize)];
187 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC: {
188 static const unsigned FixupsBySize[] = {
189 AArch64::fixup_a64_ldst8_dtprel_lo12_nc,
190 AArch64::fixup_a64_ldst16_dtprel_lo12_nc,
191 AArch64::fixup_a64_ldst32_dtprel_lo12_nc,
192 AArch64::fixup_a64_ldst64_dtprel_lo12_nc
194 assert(MemSize <= 8 && "Invalid fixup for operation");
195 FixupKind = FixupsBySize[Log2_32(MemSize)];
198 case AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12:
199 assert(MemSize == 8 && "Invalid fixup for operation");
200 FixupKind = AArch64::fixup_a64_ld64_gottprel_lo12_nc;
202 case AArch64MCExpr::VK_AARCH64_TPREL_LO12:{
203 static const unsigned FixupsBySize[] = {
204 AArch64::fixup_a64_ldst8_tprel_lo12,
205 AArch64::fixup_a64_ldst16_tprel_lo12,
206 AArch64::fixup_a64_ldst32_tprel_lo12,
207 AArch64::fixup_a64_ldst64_tprel_lo12
209 assert(MemSize <= 8 && "Invalid fixup for operation");
210 FixupKind = FixupsBySize[Log2_32(MemSize)];
213 case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC: {
214 static const unsigned FixupsBySize[] = {
215 AArch64::fixup_a64_ldst8_tprel_lo12_nc,
216 AArch64::fixup_a64_ldst16_tprel_lo12_nc,
217 AArch64::fixup_a64_ldst32_tprel_lo12_nc,
218 AArch64::fixup_a64_ldst64_tprel_lo12_nc
220 assert(MemSize <= 8 && "Invalid fixup for operation");
221 FixupKind = FixupsBySize[Log2_32(MemSize)];
224 case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12:
225 assert(MemSize == 8 && "Invalid fixup for operation");
226 FixupKind = AArch64::fixup_a64_tlsdesc_ld64_lo12_nc;
230 return getAddressWithFixup(ImmOp, FixupKind, Fixups);
234 AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
235 SmallVectorImpl<MCFixup> &Fixups) const {
236 const MCOperand &MO = MI.getOperand(OpIdx);
238 return static_cast<unsigned>(MO.getImm());
242 unsigned FixupKind = 0;
243 switch(cast<AArch64MCExpr>(MO.getExpr())->getKind()) {
244 default: llvm_unreachable("Invalid expression modifier");
245 case AArch64MCExpr::VK_AARCH64_LO12:
246 FixupKind = AArch64::fixup_a64_add_lo12; break;
247 case AArch64MCExpr::VK_AARCH64_DTPREL_HI12:
248 FixupKind = AArch64::fixup_a64_add_dtprel_hi12; break;
249 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12:
250 FixupKind = AArch64::fixup_a64_add_dtprel_lo12; break;
251 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC:
252 FixupKind = AArch64::fixup_a64_add_dtprel_lo12_nc; break;
253 case AArch64MCExpr::VK_AARCH64_TPREL_HI12:
254 FixupKind = AArch64::fixup_a64_add_tprel_hi12; break;
255 case AArch64MCExpr::VK_AARCH64_TPREL_LO12:
256 FixupKind = AArch64::fixup_a64_add_tprel_lo12; break;
257 case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC:
258 FixupKind = AArch64::fixup_a64_add_tprel_lo12_nc; break;
259 case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12:
260 FixupKind = AArch64::fixup_a64_tlsdesc_add_lo12_nc; break;
263 return getAddressWithFixup(MO, FixupKind, Fixups);
267 AArch64MCCodeEmitter::getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx,
268 SmallVectorImpl<MCFixup> &Fixups) const {
270 const MCOperand &MO = MI.getOperand(OpIdx);
272 return static_cast<unsigned>(MO.getImm());
276 unsigned Modifier = AArch64MCExpr::VK_AARCH64_None;
277 if (const AArch64MCExpr *Expr = dyn_cast<AArch64MCExpr>(MO.getExpr()))
278 Modifier = Expr->getKind();
280 unsigned FixupKind = 0;
282 case AArch64MCExpr::VK_AARCH64_None:
283 FixupKind = AArch64::fixup_a64_adr_prel_page;
285 case AArch64MCExpr::VK_AARCH64_GOT:
286 FixupKind = AArch64::fixup_a64_adr_prel_got_page;
288 case AArch64MCExpr::VK_AARCH64_GOTTPREL:
289 FixupKind = AArch64::fixup_a64_adr_gottprel_page;
291 case AArch64MCExpr::VK_AARCH64_TLSDESC:
292 FixupKind = AArch64::fixup_a64_tlsdesc_adr_page;
295 llvm_unreachable("Unknown symbol reference kind for ADRP instruction");
298 return getAddressWithFixup(MO, FixupKind, Fixups);
302 AArch64MCCodeEmitter::getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx,
303 SmallVectorImpl<MCFixup> &Fixups) const {
305 const MCOperand &MO = MI.getOperand(OpIdx);
306 assert(MO.isImm() && "Only immediate expected for shift");
308 return ((32 - MO.getImm()) & 0x1f) | (31 - MO.getImm()) << 6;
312 AArch64MCCodeEmitter::getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
313 SmallVectorImpl<MCFixup> &Fixups) const {
315 const MCOperand &MO = MI.getOperand(OpIdx);
316 assert(MO.isImm() && "Only immediate expected for shift");
318 return ((64 - MO.getImm()) & 0x3f) | (63 - MO.getImm()) << 6;
321 unsigned AArch64MCCodeEmitter::getShiftRightImm8(
322 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
323 return 8 - MI.getOperand(Op).getImm();
326 unsigned AArch64MCCodeEmitter::getShiftRightImm16(
327 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
328 return 16 - MI.getOperand(Op).getImm();
331 unsigned AArch64MCCodeEmitter::getShiftRightImm32(
332 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
333 return 32 - MI.getOperand(Op).getImm();
336 unsigned AArch64MCCodeEmitter::getShiftRightImm64(
337 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const {
338 return 64 - MI.getOperand(Op).getImm();
341 template<AArch64::Fixups fixupDesired> unsigned
342 AArch64MCCodeEmitter::getLabelOpValue(const MCInst &MI,
344 SmallVectorImpl<MCFixup> &Fixups) const {
345 const MCOperand &MO = MI.getOperand(OpIdx);
348 return getAddressWithFixup(MO, fixupDesired, Fixups);
355 AArch64MCCodeEmitter::getLoadLitLabelOpValue(const MCInst &MI,
357 SmallVectorImpl<MCFixup> &Fixups) const {
358 const MCOperand &MO = MI.getOperand(OpIdx);
366 if (isa<AArch64MCExpr>(MO.getExpr())) {
367 assert(dyn_cast<AArch64MCExpr>(MO.getExpr())->getKind()
368 == AArch64MCExpr::VK_AARCH64_GOTTPREL
369 && "Invalid symbol modifier for literal load");
370 FixupKind = AArch64::fixup_a64_ld_gottprel_prel19;
372 FixupKind = AArch64::fixup_a64_ld_prel;
375 return getAddressWithFixup(MO, FixupKind, Fixups);
380 AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI,
382 SmallVectorImpl<MCFixup> &Fixups) const {
384 return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg());
385 } else if (MO.isImm()) {
386 return static_cast<unsigned>(MO.getImm());
389 llvm_unreachable("Unable to encode MCOperand!");
394 AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
395 SmallVectorImpl<MCFixup> &Fixups) const {
396 const MCOperand &UImm16MO = MI.getOperand(OpIdx);
397 const MCOperand &ShiftMO = MI.getOperand(OpIdx + 1);
399 unsigned Result = static_cast<unsigned>(ShiftMO.getImm()) << 16;
401 if (UImm16MO.isImm()) {
402 Result |= UImm16MO.getImm();
406 const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
407 AArch64::Fixups requestedFixup;
408 switch (A64E->getKind()) {
409 default: llvm_unreachable("unexpected expression modifier");
410 case AArch64MCExpr::VK_AARCH64_ABS_G0:
411 requestedFixup = AArch64::fixup_a64_movw_uabs_g0; break;
412 case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
413 requestedFixup = AArch64::fixup_a64_movw_uabs_g0_nc; break;
414 case AArch64MCExpr::VK_AARCH64_ABS_G1:
415 requestedFixup = AArch64::fixup_a64_movw_uabs_g1; break;
416 case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
417 requestedFixup = AArch64::fixup_a64_movw_uabs_g1_nc; break;
418 case AArch64MCExpr::VK_AARCH64_ABS_G2:
419 requestedFixup = AArch64::fixup_a64_movw_uabs_g2; break;
420 case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
421 requestedFixup = AArch64::fixup_a64_movw_uabs_g2_nc; break;
422 case AArch64MCExpr::VK_AARCH64_ABS_G3:
423 requestedFixup = AArch64::fixup_a64_movw_uabs_g3; break;
424 case AArch64MCExpr::VK_AARCH64_SABS_G0:
425 requestedFixup = AArch64::fixup_a64_movw_sabs_g0; break;
426 case AArch64MCExpr::VK_AARCH64_SABS_G1:
427 requestedFixup = AArch64::fixup_a64_movw_sabs_g1; break;
428 case AArch64MCExpr::VK_AARCH64_SABS_G2:
429 requestedFixup = AArch64::fixup_a64_movw_sabs_g2; break;
430 case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
431 requestedFixup = AArch64::fixup_a64_movw_dtprel_g2; break;
432 case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
433 requestedFixup = AArch64::fixup_a64_movw_dtprel_g1; break;
434 case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
435 requestedFixup = AArch64::fixup_a64_movw_dtprel_g1_nc; break;
436 case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
437 requestedFixup = AArch64::fixup_a64_movw_dtprel_g0; break;
438 case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
439 requestedFixup = AArch64::fixup_a64_movw_dtprel_g0_nc; break;
440 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
441 requestedFixup = AArch64::fixup_a64_movw_gottprel_g1; break;
442 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
443 requestedFixup = AArch64::fixup_a64_movw_gottprel_g0_nc; break;
444 case AArch64MCExpr::VK_AARCH64_TPREL_G2:
445 requestedFixup = AArch64::fixup_a64_movw_tprel_g2; break;
446 case AArch64MCExpr::VK_AARCH64_TPREL_G1:
447 requestedFixup = AArch64::fixup_a64_movw_tprel_g1; break;
448 case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
449 requestedFixup = AArch64::fixup_a64_movw_tprel_g1_nc; break;
450 case AArch64MCExpr::VK_AARCH64_TPREL_G0:
451 requestedFixup = AArch64::fixup_a64_movw_tprel_g0; break;
452 case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
453 requestedFixup = AArch64::fixup_a64_movw_tprel_g0_nc; break;
456 return Result | getAddressWithFixup(UImm16MO, requestedFixup, Fixups);
459 template<int hasRs, int hasRt2> unsigned
460 AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst &MI,
461 unsigned EncodedValue) const {
462 if (!hasRs) EncodedValue |= 0x001F0000;
463 if (!hasRt2) EncodedValue |= 0x00007C00;
469 AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue) const {
470 // If one of the signed fixup kinds is applied to a MOVZ instruction, the
471 // eventual result could be either a MOVZ or a MOVN. It's the MCCodeEmitter's
472 // job to ensure that any bits possibly affected by this are 0. This means we
473 // must zero out bit 30 (essentially emitting a MOVN).
474 MCOperand UImm16MO = MI.getOperand(1);
476 // Nothing to do if there's no fixup.
477 if (UImm16MO.isImm())
480 const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
481 switch (A64E->getKind()) {
482 case AArch64MCExpr::VK_AARCH64_SABS_G0:
483 case AArch64MCExpr::VK_AARCH64_SABS_G1:
484 case AArch64MCExpr::VK_AARCH64_SABS_G2:
485 case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
486 case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
487 case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
488 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
489 case AArch64MCExpr::VK_AARCH64_TPREL_G2:
490 case AArch64MCExpr::VK_AARCH64_TPREL_G1:
491 case AArch64MCExpr::VK_AARCH64_TPREL_G0:
492 return EncodedValue & ~(1u << 30);
494 // Nothing to do for an unsigned fixup.
498 llvm_unreachable("Should have returned by now");
502 AArch64MCCodeEmitter::fixMulHigh(const MCInst &MI,
503 unsigned EncodedValue) const {
504 // The Ra field of SMULH and UMULH is unused: it should be assembled as 31
505 // (i.e. all bits 1) but is ignored by the processor.
506 EncodedValue |= 0x1f << 10;
510 MCCodeEmitter *llvm::createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
511 const MCRegisterInfo &MRI,
512 const MCSubtargetInfo &STI,
514 return new AArch64MCCodeEmitter(Ctx);
517 void AArch64MCCodeEmitter::
518 EncodeInstruction(const MCInst &MI, raw_ostream &OS,
519 SmallVectorImpl<MCFixup> &Fixups) const {
520 if (MI.getOpcode() == AArch64::TLSDESCCALL) {
521 // This is a directive which applies an R_AARCH64_TLSDESC_CALL to the
522 // following (BLR) instruction. It doesn't emit any code itself so it
523 // doesn't go through the normal TableGenerated channels.
524 MCFixupKind Fixup = MCFixupKind(AArch64::fixup_a64_tlsdesc_call);
526 Expr = AArch64MCExpr::CreateTLSDesc(MI.getOperand(0).getExpr(), Ctx);
527 Fixups.push_back(MCFixup::Create(0, Expr, Fixup));
531 uint32_t Binary = getBinaryCodeForInstr(MI, Fixups);
533 EmitInstruction(Binary, OS);
537 #include "AArch64GenMCCodeEmitter.inc"