1 //===-- X86Operand.h - Parsed X86 machine instruction --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #ifndef LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H
11 #define LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H
13 #include "X86AsmParserCommon.h"
14 #include "llvm/MC/MCExpr.h"
15 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16 #include "llvm/ADT/STLExtras.h"
20 /// X86Operand - Instances of this class represent a parsed X86 machine
22 struct X86Operand : public MCParsedAsmOperand {
30 SMLoc StartLoc, EndLoc;
66 X86Operand(KindTy K, SMLoc Start, SMLoc End)
67 : Kind(K), StartLoc(Start), EndLoc(End) {}
69 StringRef getSymName() override { return SymName; }
70 void *getOpDecl() override { return OpDecl; }
72 /// getStartLoc - Get the location of the first token of this operand.
73 SMLoc getStartLoc() const override { return StartLoc; }
74 /// getEndLoc - Get the location of the last token of this operand.
75 SMLoc getEndLoc() const override { return EndLoc; }
76 /// getLocRange - Get the range between the first and last token of this
78 SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
79 /// getOffsetOfLoc - Get the location of the offset operator.
80 SMLoc getOffsetOfLoc() const override { return OffsetOfLoc; }
82 void print(raw_ostream &OS) const override {}
84 StringRef getToken() const {
85 assert(Kind == Token && "Invalid access!");
86 return StringRef(Tok.Data, Tok.Length);
88 void setTokenValue(StringRef Value) {
89 assert(Kind == Token && "Invalid access!");
90 Tok.Data = Value.data();
91 Tok.Length = Value.size();
94 unsigned getReg() const override {
95 assert(Kind == Register && "Invalid access!");
99 const MCExpr *getImm() const {
100 assert(Kind == Immediate && "Invalid access!");
104 const MCExpr *getMemDisp() const {
105 assert(Kind == Memory && "Invalid access!");
108 unsigned getMemSegReg() const {
109 assert(Kind == Memory && "Invalid access!");
112 unsigned getMemBaseReg() const {
113 assert(Kind == Memory && "Invalid access!");
116 unsigned getMemIndexReg() const {
117 assert(Kind == Memory && "Invalid access!");
120 unsigned getMemScale() const {
121 assert(Kind == Memory && "Invalid access!");
124 unsigned getMemModeSize() const {
125 assert(Kind == Memory && "Invalid access!");
129 bool isToken() const override {return Kind == Token; }
131 bool isImm() const override { return Kind == Immediate; }
133 bool isImmSExti16i8() const {
137 // If this isn't a constant expr, just assume it fits and let relaxation
139 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
143 // Otherwise, check the value is in a range that makes sense for this
145 return isImmSExti16i8Value(CE->getValue());
147 bool isImmSExti32i8() const {
151 // If this isn't a constant expr, just assume it fits and let relaxation
153 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
157 // Otherwise, check the value is in a range that makes sense for this
159 return isImmSExti32i8Value(CE->getValue());
161 bool isImmSExti64i8() const {
165 // If this isn't a constant expr, just assume it fits and let relaxation
167 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
171 // Otherwise, check the value is in a range that makes sense for this
173 return isImmSExti64i8Value(CE->getValue());
175 bool isImmSExti64i32() const {
179 // If this isn't a constant expr, just assume it fits and let relaxation
181 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
185 // Otherwise, check the value is in a range that makes sense for this
187 return isImmSExti64i32Value(CE->getValue());
190 bool isOffsetOf() const override {
191 return OffsetOfLoc.getPointer();
194 bool needAddressOf() const override {
198 bool isMem() const override { return Kind == Memory; }
199 bool isMemUnsized() const {
200 return Kind == Memory && Mem.Size == 0;
202 bool isMem8() const {
203 return Kind == Memory && (!Mem.Size || Mem.Size == 8);
205 bool isMem16() const {
206 return Kind == Memory && (!Mem.Size || Mem.Size == 16);
208 bool isMem32() const {
209 return Kind == Memory && (!Mem.Size || Mem.Size == 32);
211 bool isMem64() const {
212 return Kind == Memory && (!Mem.Size || Mem.Size == 64);
214 bool isMem80() const {
215 return Kind == Memory && (!Mem.Size || Mem.Size == 80);
217 bool isMem128() const {
218 return Kind == Memory && (!Mem.Size || Mem.Size == 128);
220 bool isMem256() const {
221 return Kind == Memory && (!Mem.Size || Mem.Size == 256);
223 bool isMem512() const {
224 return Kind == Memory && (!Mem.Size || Mem.Size == 512);
227 bool isMemVX32() const {
228 return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
229 getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM15;
231 bool isMemVY32() const {
232 return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
233 getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM15;
235 bool isMemVX64() const {
236 return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
237 getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM15;
239 bool isMemVY64() const {
240 return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
241 getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM15;
243 bool isMemVZ32() const {
244 return Kind == Memory && (!Mem.Size || Mem.Size == 32) &&
245 getMemIndexReg() >= X86::ZMM0 && getMemIndexReg() <= X86::ZMM31;
247 bool isMemVZ64() const {
248 return Kind == Memory && (!Mem.Size || Mem.Size == 64) &&
249 getMemIndexReg() >= X86::ZMM0 && getMemIndexReg() <= X86::ZMM31;
252 bool isAbsMem() const {
253 return Kind == Memory && !getMemSegReg() && !getMemBaseReg() &&
254 !getMemIndexReg() && getMemScale() == 1;
257 bool isSrcIdx() const {
258 return !getMemIndexReg() && getMemScale() == 1 &&
259 (getMemBaseReg() == X86::RSI || getMemBaseReg() == X86::ESI ||
260 getMemBaseReg() == X86::SI) && isa<MCConstantExpr>(getMemDisp()) &&
261 cast<MCConstantExpr>(getMemDisp())->getValue() == 0;
263 bool isSrcIdx8() const {
264 return isMem8() && isSrcIdx();
266 bool isSrcIdx16() const {
267 return isMem16() && isSrcIdx();
269 bool isSrcIdx32() const {
270 return isMem32() && isSrcIdx();
272 bool isSrcIdx64() const {
273 return isMem64() && isSrcIdx();
276 bool isDstIdx() const {
277 return !getMemIndexReg() && getMemScale() == 1 &&
278 (getMemSegReg() == 0 || getMemSegReg() == X86::ES) &&
279 (getMemBaseReg() == X86::RDI || getMemBaseReg() == X86::EDI ||
280 getMemBaseReg() == X86::DI) && isa<MCConstantExpr>(getMemDisp()) &&
281 cast<MCConstantExpr>(getMemDisp())->getValue() == 0;
283 bool isDstIdx8() const {
284 return isMem8() && isDstIdx();
286 bool isDstIdx16() const {
287 return isMem16() && isDstIdx();
289 bool isDstIdx32() const {
290 return isMem32() && isDstIdx();
292 bool isDstIdx64() const {
293 return isMem64() && isDstIdx();
296 bool isMemOffs() const {
297 return Kind == Memory && !getMemBaseReg() && !getMemIndexReg() &&
301 bool isMemOffs16_8() const {
302 return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 8);
304 bool isMemOffs16_16() const {
305 return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 16);
307 bool isMemOffs16_32() const {
308 return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 32);
310 bool isMemOffs32_8() const {
311 return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 8);
313 bool isMemOffs32_16() const {
314 return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 16);
316 bool isMemOffs32_32() const {
317 return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 32);
319 bool isMemOffs32_64() const {
320 return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 64);
322 bool isMemOffs64_8() const {
323 return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 8);
325 bool isMemOffs64_16() const {
326 return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 16);
328 bool isMemOffs64_32() const {
329 return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 32);
331 bool isMemOffs64_64() const {
332 return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 64);
335 bool isReg() const override { return Kind == Register; }
337 bool isGR32orGR64() const {
338 return Kind == Register &&
339 (X86MCRegisterClasses[X86::GR32RegClassID].contains(getReg()) ||
340 X86MCRegisterClasses[X86::GR64RegClassID].contains(getReg()));
343 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
344 // Add as immediates when possible.
345 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
346 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
348 Inst.addOperand(MCOperand::CreateExpr(Expr));
351 void addRegOperands(MCInst &Inst, unsigned N) const {
352 assert(N == 1 && "Invalid number of operands!");
353 Inst.addOperand(MCOperand::CreateReg(getReg()));
356 static unsigned getGR32FromGR64(unsigned RegNo) {
358 default: llvm_unreachable("Unexpected register");
359 case X86::RAX: return X86::EAX;
360 case X86::RCX: return X86::ECX;
361 case X86::RDX: return X86::EDX;
362 case X86::RBX: return X86::EBX;
363 case X86::RBP: return X86::EBP;
364 case X86::RSP: return X86::ESP;
365 case X86::RSI: return X86::ESI;
366 case X86::RDI: return X86::EDI;
367 case X86::R8: return X86::R8D;
368 case X86::R9: return X86::R9D;
369 case X86::R10: return X86::R10D;
370 case X86::R11: return X86::R11D;
371 case X86::R12: return X86::R12D;
372 case X86::R13: return X86::R13D;
373 case X86::R14: return X86::R14D;
374 case X86::R15: return X86::R15D;
375 case X86::RIP: return X86::EIP;
379 void addGR32orGR64Operands(MCInst &Inst, unsigned N) const {
380 assert(N == 1 && "Invalid number of operands!");
381 unsigned RegNo = getReg();
382 if (X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo))
383 RegNo = getGR32FromGR64(RegNo);
384 Inst.addOperand(MCOperand::CreateReg(RegNo));
387 void addImmOperands(MCInst &Inst, unsigned N) const {
388 assert(N == 1 && "Invalid number of operands!");
389 addExpr(Inst, getImm());
392 void addMemOperands(MCInst &Inst, unsigned N) const {
393 assert((N == 5) && "Invalid number of operands!");
394 Inst.addOperand(MCOperand::CreateReg(getMemBaseReg()));
395 Inst.addOperand(MCOperand::CreateImm(getMemScale()));
396 Inst.addOperand(MCOperand::CreateReg(getMemIndexReg()));
397 addExpr(Inst, getMemDisp());
398 Inst.addOperand(MCOperand::CreateReg(getMemSegReg()));
401 void addAbsMemOperands(MCInst &Inst, unsigned N) const {
402 assert((N == 1) && "Invalid number of operands!");
403 // Add as immediates when possible.
404 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp()))
405 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
407 Inst.addOperand(MCOperand::CreateExpr(getMemDisp()));
410 void addSrcIdxOperands(MCInst &Inst, unsigned N) const {
411 assert((N == 2) && "Invalid number of operands!");
412 Inst.addOperand(MCOperand::CreateReg(getMemBaseReg()));
413 Inst.addOperand(MCOperand::CreateReg(getMemSegReg()));
415 void addDstIdxOperands(MCInst &Inst, unsigned N) const {
416 assert((N == 1) && "Invalid number of operands!");
417 Inst.addOperand(MCOperand::CreateReg(getMemBaseReg()));
420 void addMemOffsOperands(MCInst &Inst, unsigned N) const {
421 assert((N == 2) && "Invalid number of operands!");
422 // Add as immediates when possible.
423 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp()))
424 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
426 Inst.addOperand(MCOperand::CreateExpr(getMemDisp()));
427 Inst.addOperand(MCOperand::CreateReg(getMemSegReg()));
430 static std::unique_ptr<X86Operand> CreateToken(StringRef Str, SMLoc Loc) {
431 SMLoc EndLoc = SMLoc::getFromPointer(Loc.getPointer() + Str.size());
432 auto Res = llvm::make_unique<X86Operand>(Token, Loc, EndLoc);
433 Res->Tok.Data = Str.data();
434 Res->Tok.Length = Str.size();
438 static std::unique_ptr<X86Operand>
439 CreateReg(unsigned RegNo, SMLoc StartLoc, SMLoc EndLoc,
440 bool AddressOf = false, SMLoc OffsetOfLoc = SMLoc(),
441 StringRef SymName = StringRef(), void *OpDecl = nullptr) {
442 auto Res = llvm::make_unique<X86Operand>(Register, StartLoc, EndLoc);
443 Res->Reg.RegNo = RegNo;
444 Res->AddressOf = AddressOf;
445 Res->OffsetOfLoc = OffsetOfLoc;
446 Res->SymName = SymName;
447 Res->OpDecl = OpDecl;
451 static std::unique_ptr<X86Operand> CreateImm(const MCExpr *Val,
452 SMLoc StartLoc, SMLoc EndLoc) {
453 auto Res = llvm::make_unique<X86Operand>(Immediate, StartLoc, EndLoc);
458 /// Create an absolute memory operand.
459 static std::unique_ptr<X86Operand>
460 CreateMem(unsigned ModeSize, const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc,
461 unsigned Size = 0, StringRef SymName = StringRef(),
462 void *OpDecl = nullptr) {
463 auto Res = llvm::make_unique<X86Operand>(Memory, StartLoc, EndLoc);
465 Res->Mem.Disp = Disp;
466 Res->Mem.BaseReg = 0;
467 Res->Mem.IndexReg = 0;
469 Res->Mem.Size = Size;
470 Res->Mem.ModeSize = ModeSize;
471 Res->SymName = SymName;
472 Res->OpDecl = OpDecl;
473 Res->AddressOf = false;
477 /// Create a generalized memory operand.
478 static std::unique_ptr<X86Operand>
479 CreateMem(unsigned ModeSize, unsigned SegReg, const MCExpr *Disp,
480 unsigned BaseReg, unsigned IndexReg, unsigned Scale, SMLoc StartLoc,
481 SMLoc EndLoc, unsigned Size = 0, StringRef SymName = StringRef(),
482 void *OpDecl = nullptr) {
483 // We should never just have a displacement, that should be parsed as an
484 // absolute memory operand.
485 assert((SegReg || BaseReg || IndexReg) && "Invalid memory operand!");
487 // The scale should always be one of {1,2,4,8}.
488 assert(((Scale == 1 || Scale == 2 || Scale == 4 || Scale == 8)) &&
490 auto Res = llvm::make_unique<X86Operand>(Memory, StartLoc, EndLoc);
491 Res->Mem.SegReg = SegReg;
492 Res->Mem.Disp = Disp;
493 Res->Mem.BaseReg = BaseReg;
494 Res->Mem.IndexReg = IndexReg;
495 Res->Mem.Scale = Scale;
496 Res->Mem.Size = Size;
497 Res->Mem.ModeSize = ModeSize;
498 Res->SymName = SymName;
499 Res->OpDecl = OpDecl;
500 Res->AddressOf = false;
505 } // End of namespace llvm