1 //=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code =//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the AArch64MCCodeEmitter class.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "mccodeemitter"
15 #include "MCTargetDesc/AArch64FixupKinds.h"
16 #include "MCTargetDesc/AArch64MCExpr.h"
17 #include "MCTargetDesc/AArch64MCTargetDesc.h"
18 #include "Utils/AArch64BaseInfo.h"
19 #include "llvm/MC/MCCodeEmitter.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCInst.h"
22 #include "llvm/MC/MCInstrInfo.h"
23 #include "llvm/MC/MCRegisterInfo.h"
24 #include "llvm/MC/MCSubtargetInfo.h"
25 #include "llvm/Support/ErrorHandling.h"
26 #include "llvm/Support/raw_ostream.h"
31 class AArch64MCCodeEmitter : public MCCodeEmitter {
32 AArch64MCCodeEmitter(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION;
33 void operator=(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION;
37 AArch64MCCodeEmitter(MCContext &ctx) : Ctx(ctx) {}
39 ~AArch64MCCodeEmitter() {}
41 unsigned getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
42 SmallVectorImpl<MCFixup> &Fixups,
43 const MCSubtargetInfo &STI) const;
45 unsigned getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx,
46 SmallVectorImpl<MCFixup> &Fixups,
47 const MCSubtargetInfo &STI) const;
50 unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
51 SmallVectorImpl<MCFixup> &Fixups,
52 const MCSubtargetInfo &STI) const {
53 return getOffsetUImm12OpValue(MI, OpIdx, Fixups, STI, MemSize);
56 unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
57 SmallVectorImpl<MCFixup> &Fixups,
58 const MCSubtargetInfo &STI,
61 unsigned getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx,
62 SmallVectorImpl<MCFixup> &Fixups,
63 const MCSubtargetInfo &STI) const;
64 unsigned getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
65 SmallVectorImpl<MCFixup> &Fixups,
66 const MCSubtargetInfo &STI) const;
68 unsigned getShiftRightImm8(const MCInst &MI, unsigned Op,
69 SmallVectorImpl<MCFixup> &Fixups,
70 const MCSubtargetInfo &STI) const;
71 unsigned getShiftRightImm16(const MCInst &MI, unsigned Op,
72 SmallVectorImpl<MCFixup> &Fixups,
73 const MCSubtargetInfo &STI) const;
74 unsigned getShiftRightImm32(const MCInst &MI, unsigned Op,
75 SmallVectorImpl<MCFixup> &Fixups,
76 const MCSubtargetInfo &STI) const;
77 unsigned getShiftRightImm64(const MCInst &MI, unsigned Op,
78 SmallVectorImpl<MCFixup> &Fixups,
79 const MCSubtargetInfo &STI) const;
81 unsigned getShiftLeftImm8(const MCInst &MI, unsigned Op,
82 SmallVectorImpl<MCFixup> &Fixups,
83 const MCSubtargetInfo &STI) const;
84 unsigned getShiftLeftImm16(const MCInst &MI, unsigned Op,
85 SmallVectorImpl<MCFixup> &Fixups,
86 const MCSubtargetInfo &STI) const;
87 unsigned getShiftLeftImm32(const MCInst &MI, unsigned Op,
88 SmallVectorImpl<MCFixup> &Fixups,
89 const MCSubtargetInfo &STI) const;
90 unsigned getShiftLeftImm64(const MCInst &MI, unsigned Op,
91 SmallVectorImpl<MCFixup> &Fixups,
92 const MCSubtargetInfo &STI) const;
94 // Labels are handled mostly the same way: a symbol is needed, and
95 // just gets some fixup attached.
96 template<AArch64::Fixups fixupDesired>
97 unsigned getLabelOpValue(const MCInst &MI, unsigned OpIdx,
98 SmallVectorImpl<MCFixup> &Fixups,
99 const MCSubtargetInfo &STI) const;
101 unsigned getLoadLitLabelOpValue(const MCInst &MI, unsigned OpIdx,
102 SmallVectorImpl<MCFixup> &Fixups,
103 const MCSubtargetInfo &STI) const;
106 unsigned getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
107 SmallVectorImpl<MCFixup> &Fixups,
108 const MCSubtargetInfo &STI) const;
111 unsigned getAddressWithFixup(const MCOperand &MO,
113 SmallVectorImpl<MCFixup> &Fixups,
114 const MCSubtargetInfo &STI) const;
117 // getBinaryCodeForInstr - TableGen'erated function for getting the
118 // binary encoding for an instruction.
119 uint64_t getBinaryCodeForInstr(const MCInst &MI,
120 SmallVectorImpl<MCFixup> &Fixups,
121 const MCSubtargetInfo &STI) const;
123 /// getMachineOpValue - Return binary encoding of operand. If the machine
124 /// operand requires relocation, record the relocation and return zero.
125 unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO,
126 SmallVectorImpl<MCFixup> &Fixups,
127 const MCSubtargetInfo &STI) const;
130 void EmitByte(unsigned char C, raw_ostream &OS) const {
134 void EmitInstruction(uint32_t Val, raw_ostream &OS) const {
135 // Output the constant in little endian byte order.
136 for (unsigned i = 0; i != 4; ++i) {
137 EmitByte(Val & 0xff, OS);
143 void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
144 SmallVectorImpl<MCFixup> &Fixups,
145 const MCSubtargetInfo &STI) const;
147 template<int hasRs, int hasRt2> unsigned
148 fixLoadStoreExclusive(const MCInst &MI, unsigned EncodedValue,
149 const MCSubtargetInfo &STI) const;
151 unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue,
152 const MCSubtargetInfo &STI) const;
154 unsigned fixMulHigh(const MCInst &MI, unsigned EncodedValue,
155 const MCSubtargetInfo &STI) const;
160 } // end anonymous namespace
162 unsigned AArch64MCCodeEmitter::getAddressWithFixup(const MCOperand &MO,
164 SmallVectorImpl<MCFixup> &Fixups,
165 const MCSubtargetInfo &STI) const {
167 // This can occur for manually decoded or constructed MCInsts, but neither
168 // the assembly-parser nor instruction selection will currently produce an
169 // MCInst that's not a symbol reference.
170 assert(MO.isImm() && "Unexpected address requested");
174 const MCExpr *Expr = MO.getExpr();
175 MCFixupKind Kind = MCFixupKind(FixupKind);
176 Fixups.push_back(MCFixup::Create(0, Expr, Kind));
181 unsigned AArch64MCCodeEmitter::
182 getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
183 SmallVectorImpl<MCFixup> &Fixups,
184 const MCSubtargetInfo &STI,
186 const MCOperand &ImmOp = MI.getOperand(OpIdx);
188 return ImmOp.getImm();
190 assert(ImmOp.isExpr() && "Unexpected operand type");
191 const AArch64MCExpr *Expr = cast<AArch64MCExpr>(ImmOp.getExpr());
195 switch (Expr->getKind()) {
196 default: llvm_unreachable("Unexpected operand modifier");
197 case AArch64MCExpr::VK_AARCH64_LO12: {
198 static const unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_lo12,
199 AArch64::fixup_a64_ldst16_lo12,
200 AArch64::fixup_a64_ldst32_lo12,
201 AArch64::fixup_a64_ldst64_lo12,
202 AArch64::fixup_a64_ldst128_lo12 };
203 assert(MemSize <= 16 && "Invalid fixup for operation");
204 FixupKind = FixupsBySize[Log2_32(MemSize)];
207 case AArch64MCExpr::VK_AARCH64_GOT_LO12:
208 assert(MemSize == 8 && "Invalid fixup for operation");
209 FixupKind = AArch64::fixup_a64_ld64_got_lo12_nc;
211 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12: {
212 static const unsigned FixupsBySize[] = {
213 AArch64::fixup_a64_ldst8_dtprel_lo12,
214 AArch64::fixup_a64_ldst16_dtprel_lo12,
215 AArch64::fixup_a64_ldst32_dtprel_lo12,
216 AArch64::fixup_a64_ldst64_dtprel_lo12
218 assert(MemSize <= 8 && "Invalid fixup for operation");
219 FixupKind = FixupsBySize[Log2_32(MemSize)];
222 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC: {
223 static const unsigned FixupsBySize[] = {
224 AArch64::fixup_a64_ldst8_dtprel_lo12_nc,
225 AArch64::fixup_a64_ldst16_dtprel_lo12_nc,
226 AArch64::fixup_a64_ldst32_dtprel_lo12_nc,
227 AArch64::fixup_a64_ldst64_dtprel_lo12_nc
229 assert(MemSize <= 8 && "Invalid fixup for operation");
230 FixupKind = FixupsBySize[Log2_32(MemSize)];
233 case AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12:
234 assert(MemSize == 8 && "Invalid fixup for operation");
235 FixupKind = AArch64::fixup_a64_ld64_gottprel_lo12_nc;
237 case AArch64MCExpr::VK_AARCH64_TPREL_LO12:{
238 static const unsigned FixupsBySize[] = {
239 AArch64::fixup_a64_ldst8_tprel_lo12,
240 AArch64::fixup_a64_ldst16_tprel_lo12,
241 AArch64::fixup_a64_ldst32_tprel_lo12,
242 AArch64::fixup_a64_ldst64_tprel_lo12
244 assert(MemSize <= 8 && "Invalid fixup for operation");
245 FixupKind = FixupsBySize[Log2_32(MemSize)];
248 case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC: {
249 static const unsigned FixupsBySize[] = {
250 AArch64::fixup_a64_ldst8_tprel_lo12_nc,
251 AArch64::fixup_a64_ldst16_tprel_lo12_nc,
252 AArch64::fixup_a64_ldst32_tprel_lo12_nc,
253 AArch64::fixup_a64_ldst64_tprel_lo12_nc
255 assert(MemSize <= 8 && "Invalid fixup for operation");
256 FixupKind = FixupsBySize[Log2_32(MemSize)];
259 case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12:
260 assert(MemSize == 8 && "Invalid fixup for operation");
261 FixupKind = AArch64::fixup_a64_tlsdesc_ld64_lo12_nc;
265 return getAddressWithFixup(ImmOp, FixupKind, Fixups, STI);
269 AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
270 SmallVectorImpl<MCFixup> &Fixups,
271 const MCSubtargetInfo &STI) const {
272 const MCOperand &MO = MI.getOperand(OpIdx);
274 return static_cast<unsigned>(MO.getImm());
278 unsigned FixupKind = 0;
279 switch(cast<AArch64MCExpr>(MO.getExpr())->getKind()) {
280 default: llvm_unreachable("Invalid expression modifier");
281 case AArch64MCExpr::VK_AARCH64_LO12:
282 FixupKind = AArch64::fixup_a64_add_lo12; break;
283 case AArch64MCExpr::VK_AARCH64_DTPREL_HI12:
284 FixupKind = AArch64::fixup_a64_add_dtprel_hi12; break;
285 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12:
286 FixupKind = AArch64::fixup_a64_add_dtprel_lo12; break;
287 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC:
288 FixupKind = AArch64::fixup_a64_add_dtprel_lo12_nc; break;
289 case AArch64MCExpr::VK_AARCH64_TPREL_HI12:
290 FixupKind = AArch64::fixup_a64_add_tprel_hi12; break;
291 case AArch64MCExpr::VK_AARCH64_TPREL_LO12:
292 FixupKind = AArch64::fixup_a64_add_tprel_lo12; break;
293 case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC:
294 FixupKind = AArch64::fixup_a64_add_tprel_lo12_nc; break;
295 case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12:
296 FixupKind = AArch64::fixup_a64_tlsdesc_add_lo12_nc; break;
299 return getAddressWithFixup(MO, FixupKind, Fixups, STI);
303 AArch64MCCodeEmitter::getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx,
304 SmallVectorImpl<MCFixup> &Fixups,
305 const MCSubtargetInfo &STI) const {
307 const MCOperand &MO = MI.getOperand(OpIdx);
309 return static_cast<unsigned>(MO.getImm());
313 unsigned Modifier = AArch64MCExpr::VK_AARCH64_None;
314 if (const AArch64MCExpr *Expr = dyn_cast<AArch64MCExpr>(MO.getExpr()))
315 Modifier = Expr->getKind();
317 unsigned FixupKind = 0;
319 case AArch64MCExpr::VK_AARCH64_None:
320 FixupKind = AArch64::fixup_a64_adr_prel_page;
322 case AArch64MCExpr::VK_AARCH64_GOT:
323 FixupKind = AArch64::fixup_a64_adr_prel_got_page;
325 case AArch64MCExpr::VK_AARCH64_GOTTPREL:
326 FixupKind = AArch64::fixup_a64_adr_gottprel_page;
328 case AArch64MCExpr::VK_AARCH64_TLSDESC:
329 FixupKind = AArch64::fixup_a64_tlsdesc_adr_page;
332 llvm_unreachable("Unknown symbol reference kind for ADRP instruction");
335 return getAddressWithFixup(MO, FixupKind, Fixups, STI);
339 AArch64MCCodeEmitter::getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx,
340 SmallVectorImpl<MCFixup> &Fixups,
341 const MCSubtargetInfo &STI) const {
343 const MCOperand &MO = MI.getOperand(OpIdx);
344 assert(MO.isImm() && "Only immediate expected for shift");
346 return ((32 - MO.getImm()) & 0x1f) | (31 - MO.getImm()) << 6;
350 AArch64MCCodeEmitter::getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
351 SmallVectorImpl<MCFixup> &Fixups,
352 const MCSubtargetInfo &STI) const {
354 const MCOperand &MO = MI.getOperand(OpIdx);
355 assert(MO.isImm() && "Only immediate expected for shift");
357 return ((64 - MO.getImm()) & 0x3f) | (63 - MO.getImm()) << 6;
360 unsigned AArch64MCCodeEmitter::getShiftRightImm8(
361 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
362 const MCSubtargetInfo &STI) const {
363 return 8 - MI.getOperand(Op).getImm();
366 unsigned AArch64MCCodeEmitter::getShiftRightImm16(
367 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
368 const MCSubtargetInfo &STI) const {
369 return 16 - MI.getOperand(Op).getImm();
372 unsigned AArch64MCCodeEmitter::getShiftRightImm32(
373 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
374 const MCSubtargetInfo &STI) const {
375 return 32 - MI.getOperand(Op).getImm();
378 unsigned AArch64MCCodeEmitter::getShiftRightImm64(
379 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
380 const MCSubtargetInfo &STI) const {
381 return 64 - MI.getOperand(Op).getImm();
384 unsigned AArch64MCCodeEmitter::getShiftLeftImm8(
385 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
386 const MCSubtargetInfo &STI) const {
387 return MI.getOperand(Op).getImm() - 8;
390 unsigned AArch64MCCodeEmitter::getShiftLeftImm16(
391 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
392 const MCSubtargetInfo &STI) const {
393 return MI.getOperand(Op).getImm() - 16;
396 unsigned AArch64MCCodeEmitter::getShiftLeftImm32(
397 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
398 const MCSubtargetInfo &STI) const {
399 return MI.getOperand(Op).getImm() - 32;
402 unsigned AArch64MCCodeEmitter::getShiftLeftImm64(
403 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
404 const MCSubtargetInfo &STI) const {
405 return MI.getOperand(Op).getImm() - 64;
408 template<AArch64::Fixups fixupDesired> unsigned
409 AArch64MCCodeEmitter::getLabelOpValue(const MCInst &MI,
411 SmallVectorImpl<MCFixup> &Fixups,
412 const MCSubtargetInfo &STI) const {
413 const MCOperand &MO = MI.getOperand(OpIdx);
416 return getAddressWithFixup(MO, fixupDesired, Fixups, STI);
423 AArch64MCCodeEmitter::getLoadLitLabelOpValue(const MCInst &MI,
425 SmallVectorImpl<MCFixup> &Fixups,
426 const MCSubtargetInfo &STI) const {
427 const MCOperand &MO = MI.getOperand(OpIdx);
435 if (isa<AArch64MCExpr>(MO.getExpr())) {
436 assert(dyn_cast<AArch64MCExpr>(MO.getExpr())->getKind()
437 == AArch64MCExpr::VK_AARCH64_GOTTPREL
438 && "Invalid symbol modifier for literal load");
439 FixupKind = AArch64::fixup_a64_ld_gottprel_prel19;
441 FixupKind = AArch64::fixup_a64_ld_prel;
444 return getAddressWithFixup(MO, FixupKind, Fixups, STI);
449 AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI,
451 SmallVectorImpl<MCFixup> &Fixups,
452 const MCSubtargetInfo &STI) const {
454 return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg());
455 } else if (MO.isImm()) {
456 return static_cast<unsigned>(MO.getImm());
459 llvm_unreachable("Unable to encode MCOperand!");
464 AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
465 SmallVectorImpl<MCFixup> &Fixups,
466 const MCSubtargetInfo &STI) const {
467 const MCOperand &UImm16MO = MI.getOperand(OpIdx);
468 const MCOperand &ShiftMO = MI.getOperand(OpIdx + 1);
470 unsigned Result = static_cast<unsigned>(ShiftMO.getImm()) << 16;
472 if (UImm16MO.isImm()) {
473 Result |= UImm16MO.getImm();
477 const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
478 AArch64::Fixups requestedFixup;
479 switch (A64E->getKind()) {
480 default: llvm_unreachable("unexpected expression modifier");
481 case AArch64MCExpr::VK_AARCH64_ABS_G0:
482 requestedFixup = AArch64::fixup_a64_movw_uabs_g0; break;
483 case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
484 requestedFixup = AArch64::fixup_a64_movw_uabs_g0_nc; break;
485 case AArch64MCExpr::VK_AARCH64_ABS_G1:
486 requestedFixup = AArch64::fixup_a64_movw_uabs_g1; break;
487 case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
488 requestedFixup = AArch64::fixup_a64_movw_uabs_g1_nc; break;
489 case AArch64MCExpr::VK_AARCH64_ABS_G2:
490 requestedFixup = AArch64::fixup_a64_movw_uabs_g2; break;
491 case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
492 requestedFixup = AArch64::fixup_a64_movw_uabs_g2_nc; break;
493 case AArch64MCExpr::VK_AARCH64_ABS_G3:
494 requestedFixup = AArch64::fixup_a64_movw_uabs_g3; break;
495 case AArch64MCExpr::VK_AARCH64_SABS_G0:
496 requestedFixup = AArch64::fixup_a64_movw_sabs_g0; break;
497 case AArch64MCExpr::VK_AARCH64_SABS_G1:
498 requestedFixup = AArch64::fixup_a64_movw_sabs_g1; break;
499 case AArch64MCExpr::VK_AARCH64_SABS_G2:
500 requestedFixup = AArch64::fixup_a64_movw_sabs_g2; break;
501 case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
502 requestedFixup = AArch64::fixup_a64_movw_dtprel_g2; break;
503 case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
504 requestedFixup = AArch64::fixup_a64_movw_dtprel_g1; break;
505 case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
506 requestedFixup = AArch64::fixup_a64_movw_dtprel_g1_nc; break;
507 case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
508 requestedFixup = AArch64::fixup_a64_movw_dtprel_g0; break;
509 case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
510 requestedFixup = AArch64::fixup_a64_movw_dtprel_g0_nc; break;
511 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
512 requestedFixup = AArch64::fixup_a64_movw_gottprel_g1; break;
513 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
514 requestedFixup = AArch64::fixup_a64_movw_gottprel_g0_nc; break;
515 case AArch64MCExpr::VK_AARCH64_TPREL_G2:
516 requestedFixup = AArch64::fixup_a64_movw_tprel_g2; break;
517 case AArch64MCExpr::VK_AARCH64_TPREL_G1:
518 requestedFixup = AArch64::fixup_a64_movw_tprel_g1; break;
519 case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
520 requestedFixup = AArch64::fixup_a64_movw_tprel_g1_nc; break;
521 case AArch64MCExpr::VK_AARCH64_TPREL_G0:
522 requestedFixup = AArch64::fixup_a64_movw_tprel_g0; break;
523 case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
524 requestedFixup = AArch64::fixup_a64_movw_tprel_g0_nc; break;
527 return Result | getAddressWithFixup(UImm16MO, requestedFixup, Fixups, STI);
530 template<int hasRs, int hasRt2> unsigned
531 AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst &MI,
532 unsigned EncodedValue,
533 const MCSubtargetInfo &STI) const {
534 if (!hasRs) EncodedValue |= 0x001F0000;
535 if (!hasRt2) EncodedValue |= 0x00007C00;
541 AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue,
542 const MCSubtargetInfo &STI) const {
543 // If one of the signed fixup kinds is applied to a MOVZ instruction, the
544 // eventual result could be either a MOVZ or a MOVN. It's the MCCodeEmitter's
545 // job to ensure that any bits possibly affected by this are 0. This means we
546 // must zero out bit 30 (essentially emitting a MOVN).
547 MCOperand UImm16MO = MI.getOperand(1);
549 // Nothing to do if there's no fixup.
550 if (UImm16MO.isImm())
553 const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
554 switch (A64E->getKind()) {
555 case AArch64MCExpr::VK_AARCH64_SABS_G0:
556 case AArch64MCExpr::VK_AARCH64_SABS_G1:
557 case AArch64MCExpr::VK_AARCH64_SABS_G2:
558 case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
559 case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
560 case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
561 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
562 case AArch64MCExpr::VK_AARCH64_TPREL_G2:
563 case AArch64MCExpr::VK_AARCH64_TPREL_G1:
564 case AArch64MCExpr::VK_AARCH64_TPREL_G0:
565 return EncodedValue & ~(1u << 30);
567 // Nothing to do for an unsigned fixup.
571 llvm_unreachable("Should have returned by now");
575 AArch64MCCodeEmitter::fixMulHigh(const MCInst &MI,
576 unsigned EncodedValue,
577 const MCSubtargetInfo &STI) const {
578 // The Ra field of SMULH and UMULH is unused: it should be assembled as 31
579 // (i.e. all bits 1) but is ignored by the processor.
580 EncodedValue |= 0x1f << 10;
584 MCCodeEmitter *llvm::createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
585 const MCRegisterInfo &MRI,
586 const MCSubtargetInfo &STI,
588 return new AArch64MCCodeEmitter(Ctx);
591 void AArch64MCCodeEmitter::
592 EncodeInstruction(const MCInst &MI, raw_ostream &OS,
593 SmallVectorImpl<MCFixup> &Fixups,
594 const MCSubtargetInfo &STI) const {
595 if (MI.getOpcode() == AArch64::TLSDESCCALL) {
596 // This is a directive which applies an R_AARCH64_TLSDESC_CALL to the
597 // following (BLR) instruction. It doesn't emit any code itself so it
598 // doesn't go through the normal TableGenerated channels.
599 MCFixupKind Fixup = MCFixupKind(AArch64::fixup_a64_tlsdesc_call);
601 Expr = AArch64MCExpr::CreateTLSDesc(MI.getOperand(0).getExpr(), Ctx);
602 Fixups.push_back(MCFixup::Create(0, Expr, Fixup));
606 uint32_t Binary = getBinaryCodeForInstr(MI, Fixups, STI);
608 EmitInstruction(Binary, OS);
612 #include "AArch64GenMCCodeEmitter.inc"