1 //=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code =//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the AArch64MCCodeEmitter class.
12 //===----------------------------------------------------------------------===//
14 #include "MCTargetDesc/AArch64FixupKinds.h"
15 #include "MCTargetDesc/AArch64MCExpr.h"
16 #include "MCTargetDesc/AArch64MCTargetDesc.h"
17 #include "Utils/AArch64BaseInfo.h"
18 #include "llvm/MC/MCCodeEmitter.h"
19 #include "llvm/MC/MCContext.h"
20 #include "llvm/MC/MCInst.h"
21 #include "llvm/MC/MCInstrInfo.h"
22 #include "llvm/MC/MCRegisterInfo.h"
23 #include "llvm/MC/MCSubtargetInfo.h"
24 #include "llvm/Support/ErrorHandling.h"
25 #include "llvm/Support/raw_ostream.h"
29 #define DEBUG_TYPE "mccodeemitter"
32 class AArch64MCCodeEmitter : public MCCodeEmitter {
33 AArch64MCCodeEmitter(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION;
34 void operator=(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION;
38 AArch64MCCodeEmitter(MCContext &ctx) : Ctx(ctx) {}
40 ~AArch64MCCodeEmitter() {}
42 unsigned getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
43 SmallVectorImpl<MCFixup> &Fixups,
44 const MCSubtargetInfo &STI) const;
46 unsigned getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx,
47 SmallVectorImpl<MCFixup> &Fixups,
48 const MCSubtargetInfo &STI) const;
51 unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
52 SmallVectorImpl<MCFixup> &Fixups,
53 const MCSubtargetInfo &STI) const {
54 return getOffsetUImm12OpValue(MI, OpIdx, Fixups, STI, MemSize);
57 unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
58 SmallVectorImpl<MCFixup> &Fixups,
59 const MCSubtargetInfo &STI,
62 unsigned getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx,
63 SmallVectorImpl<MCFixup> &Fixups,
64 const MCSubtargetInfo &STI) const;
65 unsigned getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
66 SmallVectorImpl<MCFixup> &Fixups,
67 const MCSubtargetInfo &STI) const;
69 unsigned getShiftRightImm8(const MCInst &MI, unsigned Op,
70 SmallVectorImpl<MCFixup> &Fixups,
71 const MCSubtargetInfo &STI) const;
72 unsigned getShiftRightImm16(const MCInst &MI, unsigned Op,
73 SmallVectorImpl<MCFixup> &Fixups,
74 const MCSubtargetInfo &STI) const;
75 unsigned getShiftRightImm32(const MCInst &MI, unsigned Op,
76 SmallVectorImpl<MCFixup> &Fixups,
77 const MCSubtargetInfo &STI) const;
78 unsigned getShiftRightImm64(const MCInst &MI, unsigned Op,
79 SmallVectorImpl<MCFixup> &Fixups,
80 const MCSubtargetInfo &STI) const;
82 unsigned getShiftLeftImm8(const MCInst &MI, unsigned Op,
83 SmallVectorImpl<MCFixup> &Fixups,
84 const MCSubtargetInfo &STI) const;
85 unsigned getShiftLeftImm16(const MCInst &MI, unsigned Op,
86 SmallVectorImpl<MCFixup> &Fixups,
87 const MCSubtargetInfo &STI) const;
88 unsigned getShiftLeftImm32(const MCInst &MI, unsigned Op,
89 SmallVectorImpl<MCFixup> &Fixups,
90 const MCSubtargetInfo &STI) const;
91 unsigned getShiftLeftImm64(const MCInst &MI, unsigned Op,
92 SmallVectorImpl<MCFixup> &Fixups,
93 const MCSubtargetInfo &STI) const;
95 // Labels are handled mostly the same way: a symbol is needed, and
96 // just gets some fixup attached.
97 template<AArch64::Fixups fixupDesired>
98 unsigned getLabelOpValue(const MCInst &MI, unsigned OpIdx,
99 SmallVectorImpl<MCFixup> &Fixups,
100 const MCSubtargetInfo &STI) const;
102 unsigned getLoadLitLabelOpValue(const MCInst &MI, unsigned OpIdx,
103 SmallVectorImpl<MCFixup> &Fixups,
104 const MCSubtargetInfo &STI) const;
107 unsigned getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
108 SmallVectorImpl<MCFixup> &Fixups,
109 const MCSubtargetInfo &STI) const;
112 unsigned getAddressWithFixup(const MCOperand &MO,
114 SmallVectorImpl<MCFixup> &Fixups,
115 const MCSubtargetInfo &STI) const;
118 // getBinaryCodeForInstr - TableGen'erated function for getting the
119 // binary encoding for an instruction.
120 uint64_t getBinaryCodeForInstr(const MCInst &MI,
121 SmallVectorImpl<MCFixup> &Fixups,
122 const MCSubtargetInfo &STI) const;
124 /// getMachineOpValue - Return binary encoding of operand. If the machine
125 /// operand requires relocation, record the relocation and return zero.
126 unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO,
127 SmallVectorImpl<MCFixup> &Fixups,
128 const MCSubtargetInfo &STI) const;
131 void EmitByte(unsigned char C, raw_ostream &OS) const {
135 void EmitInstruction(uint32_t Val, raw_ostream &OS) const {
136 // Output the constant in little endian byte order.
137 for (unsigned i = 0; i != 4; ++i) {
138 EmitByte(Val & 0xff, OS);
144 void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
145 SmallVectorImpl<MCFixup> &Fixups,
146 const MCSubtargetInfo &STI) const;
148 template<int hasRs, int hasRt2> unsigned
149 fixLoadStoreExclusive(const MCInst &MI, unsigned EncodedValue,
150 const MCSubtargetInfo &STI) const;
152 unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue,
153 const MCSubtargetInfo &STI) const;
155 unsigned fixMulHigh(const MCInst &MI, unsigned EncodedValue,
156 const MCSubtargetInfo &STI) const;
161 } // end anonymous namespace
163 unsigned AArch64MCCodeEmitter::getAddressWithFixup(const MCOperand &MO,
165 SmallVectorImpl<MCFixup> &Fixups,
166 const MCSubtargetInfo &STI) const {
168 // This can occur for manually decoded or constructed MCInsts, but neither
169 // the assembly-parser nor instruction selection will currently produce an
170 // MCInst that's not a symbol reference.
171 assert(MO.isImm() && "Unexpected address requested");
175 const MCExpr *Expr = MO.getExpr();
176 MCFixupKind Kind = MCFixupKind(FixupKind);
177 Fixups.push_back(MCFixup::Create(0, Expr, Kind));
182 unsigned AArch64MCCodeEmitter::
183 getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
184 SmallVectorImpl<MCFixup> &Fixups,
185 const MCSubtargetInfo &STI,
187 const MCOperand &ImmOp = MI.getOperand(OpIdx);
189 return ImmOp.getImm();
191 assert(ImmOp.isExpr() && "Unexpected operand type");
192 const AArch64MCExpr *Expr = cast<AArch64MCExpr>(ImmOp.getExpr());
196 switch (Expr->getKind()) {
197 default: llvm_unreachable("Unexpected operand modifier");
198 case AArch64MCExpr::VK_AARCH64_LO12: {
199 static const unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_lo12,
200 AArch64::fixup_a64_ldst16_lo12,
201 AArch64::fixup_a64_ldst32_lo12,
202 AArch64::fixup_a64_ldst64_lo12,
203 AArch64::fixup_a64_ldst128_lo12 };
204 assert(MemSize <= 16 && "Invalid fixup for operation");
205 FixupKind = FixupsBySize[Log2_32(MemSize)];
208 case AArch64MCExpr::VK_AARCH64_GOT_LO12:
209 assert(MemSize == 8 && "Invalid fixup for operation");
210 FixupKind = AArch64::fixup_a64_ld64_got_lo12_nc;
212 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12: {
213 static const unsigned FixupsBySize[] = {
214 AArch64::fixup_a64_ldst8_dtprel_lo12,
215 AArch64::fixup_a64_ldst16_dtprel_lo12,
216 AArch64::fixup_a64_ldst32_dtprel_lo12,
217 AArch64::fixup_a64_ldst64_dtprel_lo12
219 assert(MemSize <= 8 && "Invalid fixup for operation");
220 FixupKind = FixupsBySize[Log2_32(MemSize)];
223 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC: {
224 static const unsigned FixupsBySize[] = {
225 AArch64::fixup_a64_ldst8_dtprel_lo12_nc,
226 AArch64::fixup_a64_ldst16_dtprel_lo12_nc,
227 AArch64::fixup_a64_ldst32_dtprel_lo12_nc,
228 AArch64::fixup_a64_ldst64_dtprel_lo12_nc
230 assert(MemSize <= 8 && "Invalid fixup for operation");
231 FixupKind = FixupsBySize[Log2_32(MemSize)];
234 case AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12:
235 assert(MemSize == 8 && "Invalid fixup for operation");
236 FixupKind = AArch64::fixup_a64_ld64_gottprel_lo12_nc;
238 case AArch64MCExpr::VK_AARCH64_TPREL_LO12:{
239 static const unsigned FixupsBySize[] = {
240 AArch64::fixup_a64_ldst8_tprel_lo12,
241 AArch64::fixup_a64_ldst16_tprel_lo12,
242 AArch64::fixup_a64_ldst32_tprel_lo12,
243 AArch64::fixup_a64_ldst64_tprel_lo12
245 assert(MemSize <= 8 && "Invalid fixup for operation");
246 FixupKind = FixupsBySize[Log2_32(MemSize)];
249 case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC: {
250 static const unsigned FixupsBySize[] = {
251 AArch64::fixup_a64_ldst8_tprel_lo12_nc,
252 AArch64::fixup_a64_ldst16_tprel_lo12_nc,
253 AArch64::fixup_a64_ldst32_tprel_lo12_nc,
254 AArch64::fixup_a64_ldst64_tprel_lo12_nc
256 assert(MemSize <= 8 && "Invalid fixup for operation");
257 FixupKind = FixupsBySize[Log2_32(MemSize)];
260 case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12:
261 assert(MemSize == 8 && "Invalid fixup for operation");
262 FixupKind = AArch64::fixup_a64_tlsdesc_ld64_lo12_nc;
266 return getAddressWithFixup(ImmOp, FixupKind, Fixups, STI);
270 AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
271 SmallVectorImpl<MCFixup> &Fixups,
272 const MCSubtargetInfo &STI) const {
273 const MCOperand &MO = MI.getOperand(OpIdx);
275 return static_cast<unsigned>(MO.getImm());
279 unsigned FixupKind = 0;
280 switch(cast<AArch64MCExpr>(MO.getExpr())->getKind()) {
281 default: llvm_unreachable("Invalid expression modifier");
282 case AArch64MCExpr::VK_AARCH64_LO12:
283 FixupKind = AArch64::fixup_a64_add_lo12; break;
284 case AArch64MCExpr::VK_AARCH64_DTPREL_HI12:
285 FixupKind = AArch64::fixup_a64_add_dtprel_hi12; break;
286 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12:
287 FixupKind = AArch64::fixup_a64_add_dtprel_lo12; break;
288 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC:
289 FixupKind = AArch64::fixup_a64_add_dtprel_lo12_nc; break;
290 case AArch64MCExpr::VK_AARCH64_TPREL_HI12:
291 FixupKind = AArch64::fixup_a64_add_tprel_hi12; break;
292 case AArch64MCExpr::VK_AARCH64_TPREL_LO12:
293 FixupKind = AArch64::fixup_a64_add_tprel_lo12; break;
294 case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC:
295 FixupKind = AArch64::fixup_a64_add_tprel_lo12_nc; break;
296 case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12:
297 FixupKind = AArch64::fixup_a64_tlsdesc_add_lo12_nc; break;
300 return getAddressWithFixup(MO, FixupKind, Fixups, STI);
304 AArch64MCCodeEmitter::getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx,
305 SmallVectorImpl<MCFixup> &Fixups,
306 const MCSubtargetInfo &STI) const {
308 const MCOperand &MO = MI.getOperand(OpIdx);
310 return static_cast<unsigned>(MO.getImm());
314 unsigned Modifier = AArch64MCExpr::VK_AARCH64_None;
315 if (const AArch64MCExpr *Expr = dyn_cast<AArch64MCExpr>(MO.getExpr()))
316 Modifier = Expr->getKind();
318 unsigned FixupKind = 0;
320 case AArch64MCExpr::VK_AARCH64_None:
321 FixupKind = AArch64::fixup_a64_adr_prel_page;
323 case AArch64MCExpr::VK_AARCH64_GOT:
324 FixupKind = AArch64::fixup_a64_adr_prel_got_page;
326 case AArch64MCExpr::VK_AARCH64_GOTTPREL:
327 FixupKind = AArch64::fixup_a64_adr_gottprel_page;
329 case AArch64MCExpr::VK_AARCH64_TLSDESC:
330 FixupKind = AArch64::fixup_a64_tlsdesc_adr_page;
333 llvm_unreachable("Unknown symbol reference kind for ADRP instruction");
336 return getAddressWithFixup(MO, FixupKind, Fixups, STI);
340 AArch64MCCodeEmitter::getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx,
341 SmallVectorImpl<MCFixup> &Fixups,
342 const MCSubtargetInfo &STI) const {
344 const MCOperand &MO = MI.getOperand(OpIdx);
345 assert(MO.isImm() && "Only immediate expected for shift");
347 return ((32 - MO.getImm()) & 0x1f) | (31 - MO.getImm()) << 6;
351 AArch64MCCodeEmitter::getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
352 SmallVectorImpl<MCFixup> &Fixups,
353 const MCSubtargetInfo &STI) const {
355 const MCOperand &MO = MI.getOperand(OpIdx);
356 assert(MO.isImm() && "Only immediate expected for shift");
358 return ((64 - MO.getImm()) & 0x3f) | (63 - MO.getImm()) << 6;
361 unsigned AArch64MCCodeEmitter::getShiftRightImm8(
362 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
363 const MCSubtargetInfo &STI) const {
364 return 8 - MI.getOperand(Op).getImm();
367 unsigned AArch64MCCodeEmitter::getShiftRightImm16(
368 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
369 const MCSubtargetInfo &STI) const {
370 return 16 - MI.getOperand(Op).getImm();
373 unsigned AArch64MCCodeEmitter::getShiftRightImm32(
374 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
375 const MCSubtargetInfo &STI) const {
376 return 32 - MI.getOperand(Op).getImm();
379 unsigned AArch64MCCodeEmitter::getShiftRightImm64(
380 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
381 const MCSubtargetInfo &STI) const {
382 return 64 - MI.getOperand(Op).getImm();
385 unsigned AArch64MCCodeEmitter::getShiftLeftImm8(
386 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
387 const MCSubtargetInfo &STI) const {
388 return MI.getOperand(Op).getImm() - 8;
391 unsigned AArch64MCCodeEmitter::getShiftLeftImm16(
392 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
393 const MCSubtargetInfo &STI) const {
394 return MI.getOperand(Op).getImm() - 16;
397 unsigned AArch64MCCodeEmitter::getShiftLeftImm32(
398 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
399 const MCSubtargetInfo &STI) const {
400 return MI.getOperand(Op).getImm() - 32;
403 unsigned AArch64MCCodeEmitter::getShiftLeftImm64(
404 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups,
405 const MCSubtargetInfo &STI) const {
406 return MI.getOperand(Op).getImm() - 64;
409 template<AArch64::Fixups fixupDesired> unsigned
410 AArch64MCCodeEmitter::getLabelOpValue(const MCInst &MI,
412 SmallVectorImpl<MCFixup> &Fixups,
413 const MCSubtargetInfo &STI) const {
414 const MCOperand &MO = MI.getOperand(OpIdx);
417 return getAddressWithFixup(MO, fixupDesired, Fixups, STI);
424 AArch64MCCodeEmitter::getLoadLitLabelOpValue(const MCInst &MI,
426 SmallVectorImpl<MCFixup> &Fixups,
427 const MCSubtargetInfo &STI) const {
428 const MCOperand &MO = MI.getOperand(OpIdx);
436 if (isa<AArch64MCExpr>(MO.getExpr())) {
437 assert(dyn_cast<AArch64MCExpr>(MO.getExpr())->getKind()
438 == AArch64MCExpr::VK_AARCH64_GOTTPREL
439 && "Invalid symbol modifier for literal load");
440 FixupKind = AArch64::fixup_a64_ld_gottprel_prel19;
442 FixupKind = AArch64::fixup_a64_ld_prel;
445 return getAddressWithFixup(MO, FixupKind, Fixups, STI);
450 AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI,
452 SmallVectorImpl<MCFixup> &Fixups,
453 const MCSubtargetInfo &STI) const {
455 return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg());
456 } else if (MO.isImm()) {
457 return static_cast<unsigned>(MO.getImm());
460 llvm_unreachable("Unable to encode MCOperand!");
465 AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
466 SmallVectorImpl<MCFixup> &Fixups,
467 const MCSubtargetInfo &STI) const {
468 const MCOperand &UImm16MO = MI.getOperand(OpIdx);
469 const MCOperand &ShiftMO = MI.getOperand(OpIdx + 1);
471 unsigned Result = static_cast<unsigned>(ShiftMO.getImm()) << 16;
473 if (UImm16MO.isImm()) {
474 Result |= UImm16MO.getImm();
478 const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
479 AArch64::Fixups requestedFixup;
480 switch (A64E->getKind()) {
481 default: llvm_unreachable("unexpected expression modifier");
482 case AArch64MCExpr::VK_AARCH64_ABS_G0:
483 requestedFixup = AArch64::fixup_a64_movw_uabs_g0; break;
484 case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
485 requestedFixup = AArch64::fixup_a64_movw_uabs_g0_nc; break;
486 case AArch64MCExpr::VK_AARCH64_ABS_G1:
487 requestedFixup = AArch64::fixup_a64_movw_uabs_g1; break;
488 case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
489 requestedFixup = AArch64::fixup_a64_movw_uabs_g1_nc; break;
490 case AArch64MCExpr::VK_AARCH64_ABS_G2:
491 requestedFixup = AArch64::fixup_a64_movw_uabs_g2; break;
492 case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
493 requestedFixup = AArch64::fixup_a64_movw_uabs_g2_nc; break;
494 case AArch64MCExpr::VK_AARCH64_ABS_G3:
495 requestedFixup = AArch64::fixup_a64_movw_uabs_g3; break;
496 case AArch64MCExpr::VK_AARCH64_SABS_G0:
497 requestedFixup = AArch64::fixup_a64_movw_sabs_g0; break;
498 case AArch64MCExpr::VK_AARCH64_SABS_G1:
499 requestedFixup = AArch64::fixup_a64_movw_sabs_g1; break;
500 case AArch64MCExpr::VK_AARCH64_SABS_G2:
501 requestedFixup = AArch64::fixup_a64_movw_sabs_g2; break;
502 case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
503 requestedFixup = AArch64::fixup_a64_movw_dtprel_g2; break;
504 case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
505 requestedFixup = AArch64::fixup_a64_movw_dtprel_g1; break;
506 case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
507 requestedFixup = AArch64::fixup_a64_movw_dtprel_g1_nc; break;
508 case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
509 requestedFixup = AArch64::fixup_a64_movw_dtprel_g0; break;
510 case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
511 requestedFixup = AArch64::fixup_a64_movw_dtprel_g0_nc; break;
512 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
513 requestedFixup = AArch64::fixup_a64_movw_gottprel_g1; break;
514 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
515 requestedFixup = AArch64::fixup_a64_movw_gottprel_g0_nc; break;
516 case AArch64MCExpr::VK_AARCH64_TPREL_G2:
517 requestedFixup = AArch64::fixup_a64_movw_tprel_g2; break;
518 case AArch64MCExpr::VK_AARCH64_TPREL_G1:
519 requestedFixup = AArch64::fixup_a64_movw_tprel_g1; break;
520 case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
521 requestedFixup = AArch64::fixup_a64_movw_tprel_g1_nc; break;
522 case AArch64MCExpr::VK_AARCH64_TPREL_G0:
523 requestedFixup = AArch64::fixup_a64_movw_tprel_g0; break;
524 case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
525 requestedFixup = AArch64::fixup_a64_movw_tprel_g0_nc; break;
528 return Result | getAddressWithFixup(UImm16MO, requestedFixup, Fixups, STI);
531 template<int hasRs, int hasRt2> unsigned
532 AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst &MI,
533 unsigned EncodedValue,
534 const MCSubtargetInfo &STI) const {
535 if (!hasRs) EncodedValue |= 0x001F0000;
536 if (!hasRt2) EncodedValue |= 0x00007C00;
542 AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue,
543 const MCSubtargetInfo &STI) const {
544 // If one of the signed fixup kinds is applied to a MOVZ instruction, the
545 // eventual result could be either a MOVZ or a MOVN. It's the MCCodeEmitter's
546 // job to ensure that any bits possibly affected by this are 0. This means we
547 // must zero out bit 30 (essentially emitting a MOVN).
548 MCOperand UImm16MO = MI.getOperand(1);
550 // Nothing to do if there's no fixup.
551 if (UImm16MO.isImm())
554 const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
555 switch (A64E->getKind()) {
556 case AArch64MCExpr::VK_AARCH64_SABS_G0:
557 case AArch64MCExpr::VK_AARCH64_SABS_G1:
558 case AArch64MCExpr::VK_AARCH64_SABS_G2:
559 case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
560 case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
561 case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
562 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
563 case AArch64MCExpr::VK_AARCH64_TPREL_G2:
564 case AArch64MCExpr::VK_AARCH64_TPREL_G1:
565 case AArch64MCExpr::VK_AARCH64_TPREL_G0:
566 return EncodedValue & ~(1u << 30);
568 // Nothing to do for an unsigned fixup.
572 llvm_unreachable("Should have returned by now");
576 AArch64MCCodeEmitter::fixMulHigh(const MCInst &MI,
577 unsigned EncodedValue,
578 const MCSubtargetInfo &STI) const {
579 // The Ra field of SMULH and UMULH is unused: it should be assembled as 31
580 // (i.e. all bits 1) but is ignored by the processor.
581 EncodedValue |= 0x1f << 10;
585 MCCodeEmitter *llvm::createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
586 const MCRegisterInfo &MRI,
587 const MCSubtargetInfo &STI,
589 return new AArch64MCCodeEmitter(Ctx);
592 void AArch64MCCodeEmitter::
593 EncodeInstruction(const MCInst &MI, raw_ostream &OS,
594 SmallVectorImpl<MCFixup> &Fixups,
595 const MCSubtargetInfo &STI) const {
596 if (MI.getOpcode() == AArch64::TLSDESCCALL) {
597 // This is a directive which applies an R_AARCH64_TLSDESC_CALL to the
598 // following (BLR) instruction. It doesn't emit any code itself so it
599 // doesn't go through the normal TableGenerated channels.
600 MCFixupKind Fixup = MCFixupKind(AArch64::fixup_a64_tlsdesc_call);
602 Expr = AArch64MCExpr::CreateTLSDesc(MI.getOperand(0).getExpr(), Ctx);
603 Fixups.push_back(MCFixup::Create(0, Expr, Fixup));
607 uint32_t Binary = getBinaryCodeForInstr(MI, Fixups, STI);
609 EmitInstruction(Binary, OS);
613 #include "AArch64GenMCCodeEmitter.inc"