1 //===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
11 #include "SIDefines.h"
12 #include "llvm/ADT/APFloat.h"
13 #include "llvm/ADT/SmallString.h"
14 #include "llvm/ADT/SmallVector.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/StringSwitch.h"
17 #include "llvm/ADT/Twine.h"
18 #include "llvm/MC/MCContext.h"
19 #include "llvm/MC/MCExpr.h"
20 #include "llvm/MC/MCInst.h"
21 #include "llvm/MC/MCInstrInfo.h"
22 #include "llvm/MC/MCParser/MCAsmLexer.h"
23 #include "llvm/MC/MCParser/MCAsmParser.h"
24 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
25 #include "llvm/MC/MCRegisterInfo.h"
26 #include "llvm/MC/MCStreamer.h"
27 #include "llvm/MC/MCSubtargetInfo.h"
28 #include "llvm/MC/MCTargetAsmParser.h"
29 #include "llvm/Support/SourceMgr.h"
30 #include "llvm/Support/TargetRegistry.h"
31 #include "llvm/Support/raw_ostream.h"
32 #include "llvm/Support/Debug.h"
38 struct OptionalOperand;
40 class AMDGPUOperand : public MCParsedAsmOperand {
48 SMLoc StartLoc, EndLoc;
51 AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
82 const MCRegisterInfo *TRI;
93 void addImmOperands(MCInst &Inst, unsigned N) const {
94 Inst.addOperand(MCOperand::createImm(getImm()));
97 StringRef getToken() const {
98 return StringRef(Tok.Data, Tok.Length);
101 void addRegOperands(MCInst &Inst, unsigned N) const {
102 Inst.addOperand(MCOperand::createReg(getReg()));
105 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
107 addRegOperands(Inst, N);
109 addImmOperands(Inst, N);
112 void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
113 Inst.addOperand(MCOperand::createImm(
114 Reg.Modifiers == -1 ? 0 : Reg.Modifiers));
115 addRegOperands(Inst, N);
118 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
120 addImmOperands(Inst, N);
123 Inst.addOperand(MCOperand::createExpr(Expr));
127 bool defaultTokenHasSuffix() const {
128 StringRef Token(Tok.Data, Tok.Length);
130 return Token.endswith("_e32") || Token.endswith("_e64");
133 bool isToken() const override {
134 return Kind == Token;
137 bool isImm() const override {
138 return Kind == Immediate;
141 bool isInlineImm() const {
142 float F = BitsToFloat(Imm.Val);
143 // TODO: Add 0.5pi for VI
144 return isImm() && ((Imm.Val <= 64 && Imm.Val >= -16) ||
145 (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
146 F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0));
149 bool isDSOffset0() const {
151 return Imm.Type == ImmTyDSOffset0;
154 bool isDSOffset1() const {
156 return Imm.Type == ImmTyDSOffset1;
159 int64_t getImm() const {
163 enum ImmTy getImmTy() const {
168 bool isRegKind() const {
169 return Kind == Register;
172 bool isReg() const override {
173 return Kind == Register && Reg.Modifiers == -1;
176 bool isRegWithInputMods() const {
177 return Kind == Register && (Reg.IsForcedVOP3 || Reg.Modifiers != -1);
180 void setModifiers(unsigned Mods) {
182 Reg.Modifiers = Mods;
185 bool hasModifiers() const {
187 return Reg.Modifiers != -1;
190 unsigned getReg() const override {
194 bool isRegOrImm() const {
195 return isReg() || isImm();
198 bool isRegClass(unsigned RCID) const {
199 return Reg.TRI->getRegClass(RCID).contains(getReg());
202 bool isSCSrc32() const {
203 return isInlineImm() || (isReg() && isRegClass(AMDGPU::SReg_32RegClassID));
206 bool isSSrc32() const {
207 return isImm() || (isReg() && isRegClass(AMDGPU::SReg_32RegClassID));
210 bool isSSrc64() const {
211 return isImm() || isInlineImm() ||
212 (isReg() && isRegClass(AMDGPU::SReg_64RegClassID));
215 bool isVCSrc32() const {
216 return isInlineImm() || (isReg() && isRegClass(AMDGPU::VS_32RegClassID));
219 bool isVCSrc64() const {
220 return isInlineImm() || (isReg() && isRegClass(AMDGPU::VS_64RegClassID));
223 bool isVSrc32() const {
224 return isImm() || (isReg() && isRegClass(AMDGPU::VS_32RegClassID));
227 bool isVSrc64() const {
228 return isImm() || (isReg() && isRegClass(AMDGPU::VS_64RegClassID));
231 bool isMem() const override {
235 bool isExpr() const {
236 return Kind == Expression;
239 bool isSoppBrTarget() const {
240 return isExpr() || isImm();
243 SMLoc getStartLoc() const override {
247 SMLoc getEndLoc() const override {
251 void print(raw_ostream &OS) const override { }
253 static std::unique_ptr<AMDGPUOperand> CreateImm(int64_t Val, SMLoc Loc,
254 enum ImmTy Type = ImmTyNone,
255 bool IsFPImm = false) {
256 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
258 Op->Imm.IsFPImm = IsFPImm;
265 static std::unique_ptr<AMDGPUOperand> CreateToken(StringRef Str, SMLoc Loc,
266 bool HasExplicitEncodingSize = true) {
267 auto Res = llvm::make_unique<AMDGPUOperand>(Token);
268 Res->Tok.Data = Str.data();
269 Res->Tok.Length = Str.size();
275 static std::unique_ptr<AMDGPUOperand> CreateReg(unsigned RegNo, SMLoc S,
277 const MCRegisterInfo *TRI,
279 auto Op = llvm::make_unique<AMDGPUOperand>(Register);
280 Op->Reg.RegNo = RegNo;
282 Op->Reg.Modifiers = -1;
283 Op->Reg.IsForcedVOP3 = ForceVOP3;
289 static std::unique_ptr<AMDGPUOperand> CreateExpr(const class MCExpr *Expr, SMLoc S) {
290 auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
297 bool isDSOffset() const;
298 bool isDSOffset01() const;
299 bool isSWaitCnt() const;
300 bool isMubufOffset() const;
303 class AMDGPUAsmParser : public MCTargetAsmParser {
304 MCSubtargetInfo &STI;
305 const MCInstrInfo &MII;
308 unsigned ForcedEncodingSize;
309 /// @name Auto-generated Match Functions
312 #define GET_ASSEMBLER_HEADER
313 #include "AMDGPUGenAsmMatcher.inc"
318 AMDGPUAsmParser(MCSubtargetInfo &STI, MCAsmParser &_Parser,
319 const MCInstrInfo &MII,
320 const MCTargetOptions &Options)
321 : MCTargetAsmParser(), STI(STI), MII(MII), Parser(_Parser),
322 ForcedEncodingSize(0){
324 if (STI.getFeatureBits().none()) {
325 // Set default features.
326 STI.ToggleFeature("SOUTHERN_ISLANDS");
329 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
332 unsigned getForcedEncodingSize() const {
333 return ForcedEncodingSize;
336 void setForcedEncodingSize(unsigned Size) {
337 ForcedEncodingSize = Size;
340 bool isForcedVOP3() const {
341 return ForcedEncodingSize == 64;
344 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
345 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
346 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
347 OperandVector &Operands, MCStreamer &Out,
349 bool MatchingInlineAsm) override;
350 bool ParseDirective(AsmToken DirectiveID) override;
351 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
352 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
353 SMLoc NameLoc, OperandVector &Operands) override;
355 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int,
356 int64_t Default = 0);
357 OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
358 OperandVector &Operands,
359 enum AMDGPUOperand::ImmTy ImmTy =
360 AMDGPUOperand::ImmTyNone);
361 OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
362 enum AMDGPUOperand::ImmTy ImmTy =
363 AMDGPUOperand::ImmTyNone);
364 OperandMatchResultTy parseOptionalOps(
365 const ArrayRef<OptionalOperand> &OptionalOps,
366 OperandVector &Operands);
369 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
370 void cvtDS(MCInst &Inst, const OperandVector &Operands);
371 OperandMatchResultTy parseDSOptionalOps(OperandVector &Operands);
372 OperandMatchResultTy parseDSOff01OptionalOps(OperandVector &Operands);
373 OperandMatchResultTy parseDSOffsetOptional(OperandVector &Operands);
375 bool parseCnt(int64_t &IntVal);
376 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
377 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
379 OperandMatchResultTy parseFlatOptionalOps(OperandVector &Operands);
380 OperandMatchResultTy parseFlatAtomicOptionalOps(OperandVector &Operands);
381 void cvtFlat(MCInst &Inst, const OperandVector &Operands);
383 void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
384 OperandMatchResultTy parseOffset(OperandVector &Operands);
385 OperandMatchResultTy parseMubufOptionalOps(OperandVector &Operands);
386 OperandMatchResultTy parseGLC(OperandVector &Operands);
387 OperandMatchResultTy parseSLC(OperandVector &Operands);
388 OperandMatchResultTy parseTFE(OperandVector &Operands);
390 OperandMatchResultTy parseDMask(OperandVector &Operands);
391 OperandMatchResultTy parseUNorm(OperandVector &Operands);
392 OperandMatchResultTy parseR128(OperandVector &Operands);
394 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
395 OperandMatchResultTy parseVOP3OptionalOps(OperandVector &Operands);
398 struct OptionalOperand {
400 AMDGPUOperand::ImmTy Type;
403 bool (*ConvertResult)(int64_t&);
408 static unsigned getRegClass(bool IsVgpr, unsigned RegWidth) {
411 default: llvm_unreachable("Unknown register width");
412 case 1: return AMDGPU::VGPR_32RegClassID;
413 case 2: return AMDGPU::VReg_64RegClassID;
414 case 3: return AMDGPU::VReg_96RegClassID;
415 case 4: return AMDGPU::VReg_128RegClassID;
416 case 8: return AMDGPU::VReg_256RegClassID;
417 case 16: return AMDGPU::VReg_512RegClassID;
422 default: llvm_unreachable("Unknown register width");
423 case 1: return AMDGPU::SGPR_32RegClassID;
424 case 2: return AMDGPU::SGPR_64RegClassID;
425 case 4: return AMDGPU::SReg_128RegClassID;
426 case 8: return AMDGPU::SReg_256RegClassID;
427 case 16: return AMDGPU::SReg_512RegClassID;
431 static unsigned getRegForName(const StringRef &RegName) {
433 return StringSwitch<unsigned>(RegName)
434 .Case("exec", AMDGPU::EXEC)
435 .Case("vcc", AMDGPU::VCC)
436 .Case("flat_scr", AMDGPU::FLAT_SCR)
437 .Case("m0", AMDGPU::M0)
438 .Case("scc", AMDGPU::SCC)
439 .Case("flat_scr_lo", AMDGPU::FLAT_SCR_LO)
440 .Case("flat_scr_hi", AMDGPU::FLAT_SCR_HI)
441 .Case("vcc_lo", AMDGPU::VCC_LO)
442 .Case("vcc_hi", AMDGPU::VCC_HI)
443 .Case("exec_lo", AMDGPU::EXEC_LO)
444 .Case("exec_hi", AMDGPU::EXEC_HI)
448 bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
449 const AsmToken Tok = Parser.getTok();
450 StartLoc = Tok.getLoc();
451 EndLoc = Tok.getEndLoc();
452 const StringRef &RegName = Tok.getString();
453 RegNo = getRegForName(RegName);
460 // Match vgprs and sgprs
461 if (RegName[0] != 's' && RegName[0] != 'v')
464 bool IsVgpr = RegName[0] == 'v';
466 unsigned RegIndexInClass;
467 if (RegName.size() > 1) {
468 // We have a 32-bit register
470 if (RegName.substr(1).getAsInteger(10, RegIndexInClass))
474 // We have a register greater than 32-bits.
476 int64_t RegLo, RegHi;
478 if (getLexer().isNot(AsmToken::LBrac))
482 if (getParser().parseAbsoluteExpression(RegLo))
485 if (getLexer().isNot(AsmToken::Colon))
489 if (getParser().parseAbsoluteExpression(RegHi))
492 if (getLexer().isNot(AsmToken::RBrac))
496 RegWidth = (RegHi - RegLo) + 1;
498 // VGPR registers aren't aligned.
499 RegIndexInClass = RegLo;
501 // SGPR registers are aligned. Max alignment is 4 dwords.
502 RegIndexInClass = RegLo / std::min(RegWidth, 4u);
506 const MCRegisterInfo *TRC = getContext().getRegisterInfo();
507 unsigned RC = getRegClass(IsVgpr, RegWidth);
508 if (RegIndexInClass > TRC->getRegClass(RC).getNumRegs())
510 RegNo = TRC->getRegClass(RC).getRegister(RegIndexInClass);
514 unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
516 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
518 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
519 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)))
520 return Match_InvalidOperand;
522 return Match_Success;
526 bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
527 OperandVector &Operands,
530 bool MatchingInlineAsm) {
533 switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
537 Out.EmitInstruction(Inst, STI);
539 case Match_MissingFeature:
540 return Error(IDLoc, "instruction not supported on this GPU");
542 case Match_MnemonicFail:
543 return Error(IDLoc, "unrecognized instruction mnemonic");
545 case Match_InvalidOperand: {
546 SMLoc ErrorLoc = IDLoc;
547 if (ErrorInfo != ~0ULL) {
548 if (ErrorInfo >= Operands.size()) {
549 if (isForcedVOP3()) {
550 // If 64-bit encoding has been forced we can end up with no
551 // clamp or omod operands if none of the registers have modifiers,
552 // so we need to add these to the operand list.
553 AMDGPUOperand &LastOp =
554 ((AMDGPUOperand &)*Operands[Operands.size() - 1]);
555 if (LastOp.isRegKind() ||
557 LastOp.getImmTy() != AMDGPUOperand::ImmTyNone)) {
558 SMLoc S = Parser.getTok().getLoc();
559 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
560 AMDGPUOperand::ImmTyClamp));
561 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
562 AMDGPUOperand::ImmTyOMod));
563 bool Res = MatchAndEmitInstruction(IDLoc, Opcode, Operands,
571 return Error(IDLoc, "too few operands for instruction");
574 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
575 if (ErrorLoc == SMLoc())
578 return Error(ErrorLoc, "invalid operand for instruction");
581 llvm_unreachable("Implement any new match types added!");
584 bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
588 static bool operandsHaveModifiers(const OperandVector &Operands) {
590 for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
591 const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
592 if (Op.isRegKind() && Op.hasModifiers())
594 if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOMod ||
595 Op.getImmTy() == AMDGPUOperand::ImmTyClamp))
601 AMDGPUAsmParser::OperandMatchResultTy
602 AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
604 // Try to parse with a custom parser
605 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
607 // If we successfully parsed the operand or if there as an error parsing,
610 // If we are parsing after we reach EndOfStatement then this means we
611 // are appending default values to the Operands list. This is only done
612 // by custom parser, so we shouldn't continue on to the generic parsing.
613 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
614 getLexer().is(AsmToken::EndOfStatement))
617 bool Negate = false, Abs = false;
618 if (getLexer().getKind()== AsmToken::Minus) {
623 if (getLexer().getKind() == AsmToken::Pipe) {
628 switch(getLexer().getKind()) {
629 case AsmToken::Integer: {
630 SMLoc S = Parser.getTok().getLoc();
632 if (getParser().parseAbsoluteExpression(IntVal))
633 return MatchOperand_ParseFail;
634 APInt IntVal32(32, IntVal);
635 if (IntVal32.getSExtValue() != IntVal) {
636 Error(S, "invalid immediate: only 32-bit values are legal");
637 return MatchOperand_ParseFail;
640 IntVal = IntVal32.getSExtValue();
643 Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
644 return MatchOperand_Success;
646 case AsmToken::Real: {
647 // FIXME: We should emit an error if a double precisions floating-point
648 // value is used. I'm not sure the best way to detect this.
649 SMLoc S = Parser.getTok().getLoc();
651 if (getParser().parseAbsoluteExpression(IntVal))
652 return MatchOperand_ParseFail;
654 APFloat F((float)BitsToDouble(IntVal));
658 AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S));
659 return MatchOperand_Success;
661 case AsmToken::Identifier: {
664 if (!ParseRegister(RegNo, S, E)) {
666 bool HasModifiers = operandsHaveModifiers(Operands);
667 unsigned Modifiers = 0;
673 if (getLexer().getKind() != AsmToken::Pipe)
674 return MatchOperand_ParseFail;
679 if (Modifiers && !HasModifiers) {
680 // We are adding a modifier to src1 or src2 and previous sources
681 // don't have modifiers, so we need to go back and empty modifers
682 // for each previous source.
683 for (unsigned PrevRegIdx = Operands.size() - 1; PrevRegIdx > 1;
686 AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[PrevRegIdx]);
687 RegOp.setModifiers(0);
692 Operands.push_back(AMDGPUOperand::CreateReg(
693 RegNo, S, E, getContext().getRegisterInfo(),
696 if (HasModifiers || Modifiers) {
697 AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[Operands.size() - 1]);
698 RegOp.setModifiers(Modifiers);
702 Operands.push_back(AMDGPUOperand::CreateToken(Parser.getTok().getString(),
706 return MatchOperand_Success;
709 return MatchOperand_NoMatch;
713 bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
715 SMLoc NameLoc, OperandVector &Operands) {
717 // Clear any forced encodings from the previous instruction.
718 setForcedEncodingSize(0);
720 if (Name.endswith("_e64"))
721 setForcedEncodingSize(64);
722 else if (Name.endswith("_e32"))
723 setForcedEncodingSize(32);
725 // Add the instruction mnemonic
726 Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
728 while (!getLexer().is(AsmToken::EndOfStatement)) {
729 AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
731 // Eat the comma or space if there is one.
732 if (getLexer().is(AsmToken::Comma))
736 case MatchOperand_Success: break;
737 case MatchOperand_ParseFail: return Error(getLexer().getLoc(),
738 "failed parsing operand.");
739 case MatchOperand_NoMatch: return Error(getLexer().getLoc(),
740 "not a valid operand.");
744 // Once we reach end of statement, continue parsing so we can add default
745 // values for optional arguments.
746 AMDGPUAsmParser::OperandMatchResultTy Res;
747 while ((Res = parseOperand(Operands, Name)) != MatchOperand_NoMatch) {
748 if (Res != MatchOperand_Success)
749 return Error(getLexer().getLoc(), "failed parsing operand.");
754 //===----------------------------------------------------------------------===//
756 //===----------------------------------------------------------------------===//
758 AMDGPUAsmParser::OperandMatchResultTy
759 AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int,
762 // We are at the end of the statement, and this is a default argument, so
763 // use a default value.
764 if (getLexer().is(AsmToken::EndOfStatement)) {
766 return MatchOperand_Success;
769 switch(getLexer().getKind()) {
770 default: return MatchOperand_NoMatch;
771 case AsmToken::Identifier: {
772 StringRef OffsetName = Parser.getTok().getString();
773 if (!OffsetName.equals(Prefix))
774 return MatchOperand_NoMatch;
777 if (getLexer().isNot(AsmToken::Colon))
778 return MatchOperand_ParseFail;
781 if (getLexer().isNot(AsmToken::Integer))
782 return MatchOperand_ParseFail;
784 if (getParser().parseAbsoluteExpression(Int))
785 return MatchOperand_ParseFail;
789 return MatchOperand_Success;
792 AMDGPUAsmParser::OperandMatchResultTy
793 AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
794 enum AMDGPUOperand::ImmTy ImmTy) {
796 SMLoc S = Parser.getTok().getLoc();
799 AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Offset);
800 if (Res != MatchOperand_Success)
803 Operands.push_back(AMDGPUOperand::CreateImm(Offset, S, ImmTy));
804 return MatchOperand_Success;
807 AMDGPUAsmParser::OperandMatchResultTy
808 AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
809 enum AMDGPUOperand::ImmTy ImmTy) {
811 SMLoc S = Parser.getTok().getLoc();
813 // We are at the end of the statement, and this is a default argument, so
814 // use a default value.
815 if (getLexer().isNot(AsmToken::EndOfStatement)) {
816 switch(getLexer().getKind()) {
817 case AsmToken::Identifier: {
818 StringRef Tok = Parser.getTok().getString();
822 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
826 return MatchOperand_NoMatch;
831 return MatchOperand_NoMatch;
835 Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy));
836 return MatchOperand_Success;
839 static bool operandsHasOptionalOp(const OperandVector &Operands,
840 const OptionalOperand &OOp) {
841 for (unsigned i = 0; i < Operands.size(); i++) {
842 const AMDGPUOperand &ParsedOp = ((const AMDGPUOperand &)*Operands[i]);
843 if ((ParsedOp.isImm() && ParsedOp.getImmTy() == OOp.Type) ||
844 (ParsedOp.isToken() && ParsedOp.getToken() == OOp.Name))
851 AMDGPUAsmParser::OperandMatchResultTy
852 AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
853 OperandVector &Operands) {
854 SMLoc S = Parser.getTok().getLoc();
855 for (const OptionalOperand &Op : OptionalOps) {
856 if (operandsHasOptionalOp(Operands, Op))
858 AMDGPUAsmParser::OperandMatchResultTy Res;
861 Res = parseNamedBit(Op.Name, Operands, Op.Type);
862 if (Res == MatchOperand_NoMatch)
867 Res = parseIntWithPrefix(Op.Name, Value, Op.Default);
869 if (Res == MatchOperand_NoMatch)
872 if (Res != MatchOperand_Success)
875 if (Op.ConvertResult && !Op.ConvertResult(Value)) {
876 return MatchOperand_ParseFail;
879 Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
880 return MatchOperand_Success;
882 return MatchOperand_NoMatch;
885 //===----------------------------------------------------------------------===//
887 //===----------------------------------------------------------------------===//
889 static const OptionalOperand DSOptionalOps [] = {
890 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
891 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
894 static const OptionalOperand DSOptionalOpsOff01 [] = {
895 {"offset0", AMDGPUOperand::ImmTyDSOffset0, false, 0, nullptr},
896 {"offset1", AMDGPUOperand::ImmTyDSOffset1, false, 0, nullptr},
897 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
900 AMDGPUAsmParser::OperandMatchResultTy
901 AMDGPUAsmParser::parseDSOptionalOps(OperandVector &Operands) {
902 return parseOptionalOps(DSOptionalOps, Operands);
904 AMDGPUAsmParser::OperandMatchResultTy
905 AMDGPUAsmParser::parseDSOff01OptionalOps(OperandVector &Operands) {
906 return parseOptionalOps(DSOptionalOpsOff01, Operands);
909 AMDGPUAsmParser::OperandMatchResultTy
910 AMDGPUAsmParser::parseDSOffsetOptional(OperandVector &Operands) {
911 SMLoc S = Parser.getTok().getLoc();
912 AMDGPUAsmParser::OperandMatchResultTy Res =
913 parseIntWithPrefix("offset", Operands, AMDGPUOperand::ImmTyOffset);
914 if (Res == MatchOperand_NoMatch) {
915 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
916 AMDGPUOperand::ImmTyOffset));
917 Res = MatchOperand_Success;
922 bool AMDGPUOperand::isDSOffset() const {
923 return isImm() && isUInt<16>(getImm());
926 bool AMDGPUOperand::isDSOffset01() const {
927 return isImm() && isUInt<8>(getImm());
930 void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
931 const OperandVector &Operands) {
933 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
935 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
936 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
938 // Add the register arguments
940 Op.addRegOperands(Inst, 1);
944 // Handle optional arguments
945 OptionalIdx[Op.getImmTy()] = i;
948 unsigned Offset0Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset0];
949 unsigned Offset1Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset1];
950 unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
952 ((AMDGPUOperand &)*Operands[Offset0Idx]).addImmOperands(Inst, 1); // offset0
953 ((AMDGPUOperand &)*Operands[Offset1Idx]).addImmOperands(Inst, 1); // offset1
954 ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
955 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
958 void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
960 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
961 bool GDSOnly = false;
963 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
964 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
966 // Add the register arguments
968 Op.addRegOperands(Inst, 1);
972 if (Op.isToken() && Op.getToken() == "gds") {
977 // Handle optional arguments
978 OptionalIdx[Op.getImmTy()] = i;
981 unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
982 ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1); // offset
985 unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
986 ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
988 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
992 //===----------------------------------------------------------------------===//
994 //===----------------------------------------------------------------------===//
996 bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
997 StringRef CntName = Parser.getTok().getString();
1001 if (getLexer().isNot(AsmToken::LParen))
1005 if (getLexer().isNot(AsmToken::Integer))
1008 if (getParser().parseAbsoluteExpression(CntVal))
1011 if (getLexer().isNot(AsmToken::RParen))
1015 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
1021 if (CntName == "vmcnt") {
1024 } else if (CntName == "expcnt") {
1027 } else if (CntName == "lgkmcnt") {
1034 IntVal &= ~(CntMask << CntShift);
1035 IntVal |= (CntVal << CntShift);
1039 AMDGPUAsmParser::OperandMatchResultTy
1040 AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
1041 // Disable all counters by default.
1045 int64_t CntVal = 0x77f;
1046 SMLoc S = Parser.getTok().getLoc();
1048 switch(getLexer().getKind()) {
1049 default: return MatchOperand_ParseFail;
1050 case AsmToken::Integer:
1051 // The operand can be an integer value.
1052 if (getParser().parseAbsoluteExpression(CntVal))
1053 return MatchOperand_ParseFail;
1056 case AsmToken::Identifier:
1058 if (parseCnt(CntVal))
1059 return MatchOperand_ParseFail;
1060 } while(getLexer().isNot(AsmToken::EndOfStatement));
1063 Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S));
1064 return MatchOperand_Success;
1067 bool AMDGPUOperand::isSWaitCnt() const {
1071 //===----------------------------------------------------------------------===//
1072 // sopp branch targets
1073 //===----------------------------------------------------------------------===//
1075 AMDGPUAsmParser::OperandMatchResultTy
1076 AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
1077 SMLoc S = Parser.getTok().getLoc();
1079 switch (getLexer().getKind()) {
1080 default: return MatchOperand_ParseFail;
1081 case AsmToken::Integer: {
1083 if (getParser().parseAbsoluteExpression(Imm))
1084 return MatchOperand_ParseFail;
1085 Operands.push_back(AMDGPUOperand::CreateImm(Imm, S));
1086 return MatchOperand_Success;
1089 case AsmToken::Identifier:
1090 Operands.push_back(AMDGPUOperand::CreateExpr(
1091 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
1092 Parser.getTok().getString()), getContext()), S));
1094 return MatchOperand_Success;
1098 //===----------------------------------------------------------------------===//
1100 //===----------------------------------------------------------------------===//
1102 static const OptionalOperand FlatOptionalOps [] = {
1103 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1104 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1105 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1108 static const OptionalOperand FlatAtomicOptionalOps [] = {
1109 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1110 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1113 AMDGPUAsmParser::OperandMatchResultTy
1114 AMDGPUAsmParser::parseFlatOptionalOps(OperandVector &Operands) {
1115 return parseOptionalOps(FlatOptionalOps, Operands);
1118 AMDGPUAsmParser::OperandMatchResultTy
1119 AMDGPUAsmParser::parseFlatAtomicOptionalOps(OperandVector &Operands) {
1120 return parseOptionalOps(FlatAtomicOptionalOps, Operands);
1123 void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
1124 const OperandVector &Operands) {
1125 std::map<AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1127 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1128 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1130 // Add the register arguments
1132 Op.addRegOperands(Inst, 1);
1136 // Handle 'glc' token which is sometimes hard-coded into the
1137 // asm string. There are no MCInst operands for these.
1141 // Handle optional arguments
1142 OptionalIdx[Op.getImmTy()] = i;
1146 // flat atomic instructions don't have a glc argument.
1147 if (OptionalIdx.count(AMDGPUOperand::ImmTyGLC)) {
1148 unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
1149 ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
1152 unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
1153 unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
1155 ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
1156 ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
1159 //===----------------------------------------------------------------------===//
1161 //===----------------------------------------------------------------------===//
1163 static const OptionalOperand MubufOptionalOps [] = {
1164 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1165 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1166 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1167 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1170 AMDGPUAsmParser::OperandMatchResultTy
1171 AMDGPUAsmParser::parseMubufOptionalOps(OperandVector &Operands) {
1172 return parseOptionalOps(MubufOptionalOps, Operands);
1175 AMDGPUAsmParser::OperandMatchResultTy
1176 AMDGPUAsmParser::parseOffset(OperandVector &Operands) {
1177 return parseIntWithPrefix("offset", Operands);
1180 AMDGPUAsmParser::OperandMatchResultTy
1181 AMDGPUAsmParser::parseGLC(OperandVector &Operands) {
1182 return parseNamedBit("glc", Operands);
1185 AMDGPUAsmParser::OperandMatchResultTy
1186 AMDGPUAsmParser::parseSLC(OperandVector &Operands) {
1187 return parseNamedBit("slc", Operands);
1190 AMDGPUAsmParser::OperandMatchResultTy
1191 AMDGPUAsmParser::parseTFE(OperandVector &Operands) {
1192 return parseNamedBit("tfe", Operands);
1195 bool AMDGPUOperand::isMubufOffset() const {
1196 return isImm() && isUInt<12>(getImm());
1199 void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
1200 const OperandVector &Operands) {
1201 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1203 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1204 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1206 // Add the register arguments
1208 Op.addRegOperands(Inst, 1);
1212 // Handle the case where soffset is an immediate
1213 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
1214 Op.addImmOperands(Inst, 1);
1218 // Handle tokens like 'offen' which are sometimes hard-coded into the
1219 // asm string. There are no MCInst operands for these.
1225 // Handle optional arguments
1226 OptionalIdx[Op.getImmTy()] = i;
1229 assert(OptionalIdx.size() == 4);
1231 unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
1232 unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
1233 unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
1234 unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
1236 ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1);
1237 ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
1238 ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
1239 ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
1242 //===----------------------------------------------------------------------===//
1244 //===----------------------------------------------------------------------===//
1246 AMDGPUAsmParser::OperandMatchResultTy
1247 AMDGPUAsmParser::parseDMask(OperandVector &Operands) {
1248 return parseIntWithPrefix("dmask", Operands);
1251 AMDGPUAsmParser::OperandMatchResultTy
1252 AMDGPUAsmParser::parseUNorm(OperandVector &Operands) {
1253 return parseNamedBit("unorm", Operands);
1256 AMDGPUAsmParser::OperandMatchResultTy
1257 AMDGPUAsmParser::parseR128(OperandVector &Operands) {
1258 return parseNamedBit("r128", Operands);
1261 //===----------------------------------------------------------------------===//
1263 //===----------------------------------------------------------------------===//
1265 static bool ConvertOmodMul(int64_t &Mul) {
1266 if (Mul != 1 && Mul != 2 && Mul != 4)
1273 static bool ConvertOmodDiv(int64_t &Div) {
1287 static const OptionalOperand VOP3OptionalOps [] = {
1288 {"clamp", AMDGPUOperand::ImmTyClamp, true, 0, nullptr},
1289 {"mul", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodMul},
1290 {"div", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodDiv},
1293 static bool isVOP3(OperandVector &Operands) {
1294 if (operandsHaveModifiers(Operands))
1297 AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]);
1299 if (DstOp.isReg() && DstOp.isRegClass(AMDGPU::SGPR_64RegClassID))
1302 if (Operands.size() >= 5)
1305 if (Operands.size() > 3) {
1306 AMDGPUOperand &Src1Op = ((AMDGPUOperand&)*Operands[3]);
1307 if (Src1Op.getReg() && (Src1Op.isRegClass(AMDGPU::SReg_32RegClassID) ||
1308 Src1Op.isRegClass(AMDGPU::SReg_64RegClassID)))
1314 AMDGPUAsmParser::OperandMatchResultTy
1315 AMDGPUAsmParser::parseVOP3OptionalOps(OperandVector &Operands) {
1317 // The value returned by this function may change after parsing
1318 // an operand so store the original value here.
1319 bool HasModifiers = operandsHaveModifiers(Operands);
1321 bool IsVOP3 = isVOP3(Operands);
1322 if (HasModifiers || IsVOP3 ||
1323 getLexer().isNot(AsmToken::EndOfStatement) ||
1324 getForcedEncodingSize() == 64) {
1326 AMDGPUAsmParser::OperandMatchResultTy Res =
1327 parseOptionalOps(VOP3OptionalOps, Operands);
1329 if (!HasModifiers && Res == MatchOperand_Success) {
1330 // We have added a modifier operation, so we need to make sure all
1331 // previous register operands have modifiers
1332 for (unsigned i = 2, e = Operands.size(); i != e; ++i) {
1333 AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
1340 return MatchOperand_NoMatch;
1343 void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
1344 ((AMDGPUOperand &)*Operands[1]).addRegOperands(Inst, 1);
1347 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1349 if (operandsHaveModifiers(Operands)) {
1350 for (unsigned e = Operands.size(); i != e; ++i) {
1351 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1353 if (Op.isRegWithInputMods()) {
1354 ((AMDGPUOperand &)*Operands[i]).addRegWithInputModsOperands(Inst, 2);
1357 OptionalIdx[Op.getImmTy()] = i;
1360 unsigned ClampIdx = OptionalIdx[AMDGPUOperand::ImmTyClamp];
1361 unsigned OModIdx = OptionalIdx[AMDGPUOperand::ImmTyOMod];
1363 ((AMDGPUOperand &)*Operands[ClampIdx]).addImmOperands(Inst, 1);
1364 ((AMDGPUOperand &)*Operands[OModIdx]).addImmOperands(Inst, 1);
1366 for (unsigned e = Operands.size(); i != e; ++i)
1367 ((AMDGPUOperand &)*Operands[i]).addRegOrImmOperands(Inst, 1);
1371 /// Force static initialization.
1372 extern "C" void LLVMInitializeAMDGPUAsmParser() {
1373 RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
1374 RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
1377 #define GET_REGISTER_MATCHER
1378 #define GET_MATCHER_IMPLEMENTATION
1379 #include "AMDGPUGenAsmMatcher.inc"