1 //===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief R600 Implementation of TargetInstrInfo.
13 //===----------------------------------------------------------------------===//
15 #include "R600InstrInfo.h"
17 #include "AMDGPUSubtarget.h"
18 #include "AMDGPUTargetMachine.h"
19 #include "R600Defines.h"
20 #include "R600MachineFunctionInfo.h"
21 #include "R600RegisterInfo.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #define GET_INSTRINFO_CTOR_DTOR
29 #include "AMDGPUGenDFAPacketizer.inc"
31 R600InstrInfo::R600InstrInfo(const AMDGPUSubtarget &st)
32 : AMDGPUInstrInfo(st),
36 const R600RegisterInfo &R600InstrInfo::getRegisterInfo() const {
40 bool R600InstrInfo::isTrig(const MachineInstr &MI) const {
41 return get(MI.getOpcode()).TSFlags & R600_InstFlag::TRIG;
44 bool R600InstrInfo::isVector(const MachineInstr &MI) const {
45 return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR;
49 R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
50 MachineBasicBlock::iterator MI, DebugLoc DL,
51 unsigned DestReg, unsigned SrcReg,
53 unsigned VectorComponents = 0;
54 if ((AMDGPU::R600_Reg128RegClass.contains(DestReg) ||
55 AMDGPU::R600_Reg128VerticalRegClass.contains(DestReg)) &&
56 (AMDGPU::R600_Reg128RegClass.contains(SrcReg) ||
57 AMDGPU::R600_Reg128VerticalRegClass.contains(SrcReg))) {
59 } else if((AMDGPU::R600_Reg64RegClass.contains(DestReg) ||
60 AMDGPU::R600_Reg64VerticalRegClass.contains(DestReg)) &&
61 (AMDGPU::R600_Reg64RegClass.contains(SrcReg) ||
62 AMDGPU::R600_Reg64VerticalRegClass.contains(SrcReg))) {
66 if (VectorComponents > 0) {
67 for (unsigned I = 0; I < VectorComponents; I++) {
68 unsigned SubRegIndex = RI.getSubRegFromChannel(I);
69 buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
70 RI.getSubReg(DestReg, SubRegIndex),
71 RI.getSubReg(SrcReg, SubRegIndex))
73 RegState::Define | RegState::Implicit);
76 MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
78 NewMI->getOperand(getOperandIdx(*NewMI, AMDGPU::OpName::src0))
83 /// \returns true if \p MBBI can be moved into a new basic.
84 bool R600InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
85 MachineBasicBlock::iterator MBBI) const {
86 for (MachineInstr::const_mop_iterator I = MBBI->operands_begin(),
87 E = MBBI->operands_end(); I != E; ++I) {
88 if (I->isReg() && !TargetRegisterInfo::isVirtualRegister(I->getReg()) &&
89 I->isUse() && RI.isPhysRegLiveAcrossClauses(I->getReg()))
95 bool R600InstrInfo::isMov(unsigned Opcode) const {
99 default: return false;
101 case AMDGPU::MOV_IMM_F32:
102 case AMDGPU::MOV_IMM_I32:
107 // Some instructions act as place holders to emulate operations that the GPU
108 // hardware does automatically. This function can be used to check if
109 // an opcode falls into this category.
110 bool R600InstrInfo::isPlaceHolderOpcode(unsigned Opcode) const {
112 default: return false;
118 bool R600InstrInfo::isReductionOp(unsigned Opcode) const {
122 bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
124 default: return false;
125 case AMDGPU::CUBE_r600_pseudo:
126 case AMDGPU::CUBE_r600_real:
127 case AMDGPU::CUBE_eg_pseudo:
128 case AMDGPU::CUBE_eg_real:
133 bool R600InstrInfo::isALUInstr(unsigned Opcode) const {
134 unsigned TargetFlags = get(Opcode).TSFlags;
136 return (TargetFlags & R600_InstFlag::ALU_INST);
139 bool R600InstrInfo::hasInstrModifiers(unsigned Opcode) const {
140 unsigned TargetFlags = get(Opcode).TSFlags;
142 return ((TargetFlags & R600_InstFlag::OP1) |
143 (TargetFlags & R600_InstFlag::OP2) |
144 (TargetFlags & R600_InstFlag::OP3));
147 bool R600InstrInfo::isLDSInstr(unsigned Opcode) const {
148 unsigned TargetFlags = get(Opcode).TSFlags;
150 return ((TargetFlags & R600_InstFlag::LDS_1A) |
151 (TargetFlags & R600_InstFlag::LDS_1A1D) |
152 (TargetFlags & R600_InstFlag::LDS_1A2D));
155 bool R600InstrInfo::isLDSNoRetInstr(unsigned Opcode) const {
156 return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) == -1;
159 bool R600InstrInfo::isLDSRetInstr(unsigned Opcode) const {
160 return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) != -1;
163 bool R600InstrInfo::canBeConsideredALU(const MachineInstr *MI) const {
164 if (isALUInstr(MI->getOpcode()))
166 if (isVector(*MI) || isCubeOp(MI->getOpcode()))
168 switch (MI->getOpcode()) {
170 case AMDGPU::INTERP_PAIR_XY:
171 case AMDGPU::INTERP_PAIR_ZW:
172 case AMDGPU::INTERP_VEC_LOAD:
181 bool R600InstrInfo::isTransOnly(unsigned Opcode) const {
182 if (ST.hasCaymanISA())
184 return (get(Opcode).getSchedClass() == AMDGPU::Sched::TransALU);
187 bool R600InstrInfo::isTransOnly(const MachineInstr *MI) const {
188 return isTransOnly(MI->getOpcode());
191 bool R600InstrInfo::isVectorOnly(unsigned Opcode) const {
192 return (get(Opcode).getSchedClass() == AMDGPU::Sched::VecALU);
195 bool R600InstrInfo::isVectorOnly(const MachineInstr *MI) const {
196 return isVectorOnly(MI->getOpcode());
199 bool R600InstrInfo::isExport(unsigned Opcode) const {
200 return (get(Opcode).TSFlags & R600_InstFlag::IS_EXPORT);
203 bool R600InstrInfo::usesVertexCache(unsigned Opcode) const {
204 return ST.hasVertexCache() && IS_VTX(get(Opcode));
207 bool R600InstrInfo::usesVertexCache(const MachineInstr *MI) const {
208 const MachineFunction *MF = MI->getParent()->getParent();
209 const R600MachineFunctionInfo *MFI = MF->getInfo<R600MachineFunctionInfo>();
210 return MFI->getShaderType() != ShaderType::COMPUTE &&
211 usesVertexCache(MI->getOpcode());
214 bool R600InstrInfo::usesTextureCache(unsigned Opcode) const {
215 return (!ST.hasVertexCache() && IS_VTX(get(Opcode))) || IS_TEX(get(Opcode));
218 bool R600InstrInfo::usesTextureCache(const MachineInstr *MI) const {
219 const MachineFunction *MF = MI->getParent()->getParent();
220 const R600MachineFunctionInfo *MFI = MF->getInfo<R600MachineFunctionInfo>();
221 return (MFI->getShaderType() == ShaderType::COMPUTE &&
222 usesVertexCache(MI->getOpcode())) ||
223 usesTextureCache(MI->getOpcode());
226 bool R600InstrInfo::mustBeLastInClause(unsigned Opcode) const {
229 case AMDGPU::GROUP_BARRIER:
236 bool R600InstrInfo::usesAddressRegister(MachineInstr *MI) const {
237 return MI->findRegisterUseOperandIdx(AMDGPU::AR_X) != -1;
240 bool R600InstrInfo::definesAddressRegister(MachineInstr *MI) const {
241 return MI->findRegisterDefOperandIdx(AMDGPU::AR_X) != -1;
244 bool R600InstrInfo::readsLDSSrcReg(const MachineInstr *MI) const {
245 if (!isALUInstr(MI->getOpcode())) {
248 for (MachineInstr::const_mop_iterator I = MI->operands_begin(),
249 E = MI->operands_end(); I != E; ++I) {
250 if (!I->isReg() || !I->isUse() ||
251 TargetRegisterInfo::isVirtualRegister(I->getReg()))
254 if (AMDGPU::R600_LDS_SRC_REGRegClass.contains(I->getReg()))
260 int R600InstrInfo::getSrcIdx(unsigned Opcode, unsigned SrcNum) const {
261 static const unsigned OpTable[] = {
262 AMDGPU::OpName::src0,
263 AMDGPU::OpName::src1,
268 return getOperandIdx(Opcode, OpTable[SrcNum]);
271 #define SRC_SEL_ROWS 11
272 int R600InstrInfo::getSelIdx(unsigned Opcode, unsigned SrcIdx) const {
273 static const unsigned SrcSelTable[SRC_SEL_ROWS][2] = {
274 {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
275 {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel},
276 {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel},
277 {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
278 {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
279 {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z},
280 {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W},
281 {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X},
282 {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y},
283 {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z},
284 {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W}
287 for (unsigned i = 0; i < SRC_SEL_ROWS; ++i) {
288 if (getOperandIdx(Opcode, SrcSelTable[i][0]) == (int)SrcIdx) {
289 return getOperandIdx(Opcode, SrcSelTable[i][1]);
296 SmallVector<std::pair<MachineOperand *, int64_t>, 3>
297 R600InstrInfo::getSrcs(MachineInstr *MI) const {
298 SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result;
300 if (MI->getOpcode() == AMDGPU::DOT_4) {
301 static const unsigned OpTable[8][2] = {
302 {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
303 {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
304 {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z},
305 {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W},
306 {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X},
307 {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y},
308 {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z},
309 {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W},
312 for (unsigned j = 0; j < 8; j++) {
313 MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(),
315 unsigned Reg = MO.getReg();
316 if (Reg == AMDGPU::ALU_CONST) {
317 unsigned Sel = MI->getOperand(getOperandIdx(MI->getOpcode(),
318 OpTable[j][1])).getImm();
319 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel));
327 static const unsigned OpTable[3][2] = {
328 {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
329 {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel},
330 {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel},
333 for (unsigned j = 0; j < 3; j++) {
334 int SrcIdx = getOperandIdx(MI->getOpcode(), OpTable[j][0]);
337 MachineOperand &MO = MI->getOperand(SrcIdx);
338 unsigned Reg = MI->getOperand(SrcIdx).getReg();
339 if (Reg == AMDGPU::ALU_CONST) {
340 unsigned Sel = MI->getOperand(
341 getOperandIdx(MI->getOpcode(), OpTable[j][1])).getImm();
342 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel));
345 if (Reg == AMDGPU::ALU_LITERAL_X) {
346 unsigned Imm = MI->getOperand(
347 getOperandIdx(MI->getOpcode(), AMDGPU::OpName::literal)).getImm();
348 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Imm));
351 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, 0));
356 std::vector<std::pair<int, unsigned> >
357 R600InstrInfo::ExtractSrcs(MachineInstr *MI,
358 const DenseMap<unsigned, unsigned> &PV,
359 unsigned &ConstCount) const {
361 const SmallVector<std::pair<MachineOperand *, int64_t>, 3> Srcs = getSrcs(MI);
362 const std::pair<int, unsigned> DummyPair(-1, 0);
363 std::vector<std::pair<int, unsigned> > Result;
365 for (unsigned n = Srcs.size(); i < n; ++i) {
366 unsigned Reg = Srcs[i].first->getReg();
367 unsigned Index = RI.getEncodingValue(Reg) & 0xff;
368 if (Reg == AMDGPU::OQAP) {
369 Result.push_back(std::pair<int, unsigned>(Index, 0));
371 if (PV.find(Reg) != PV.end()) {
372 // 255 is used to tells its a PS/PV reg
373 Result.push_back(std::pair<int, unsigned>(255, 0));
378 Result.push_back(DummyPair);
381 unsigned Chan = RI.getHWRegChan(Reg);
382 Result.push_back(std::pair<int, unsigned>(Index, Chan));
385 Result.push_back(DummyPair);
389 static std::vector<std::pair<int, unsigned> >
390 Swizzle(std::vector<std::pair<int, unsigned> > Src,
391 R600InstrInfo::BankSwizzle Swz) {
392 if (Src[0] == Src[1])
395 case R600InstrInfo::ALU_VEC_012_SCL_210:
397 case R600InstrInfo::ALU_VEC_021_SCL_122:
398 std::swap(Src[1], Src[2]);
400 case R600InstrInfo::ALU_VEC_102_SCL_221:
401 std::swap(Src[0], Src[1]);
403 case R600InstrInfo::ALU_VEC_120_SCL_212:
404 std::swap(Src[0], Src[1]);
405 std::swap(Src[0], Src[2]);
407 case R600InstrInfo::ALU_VEC_201:
408 std::swap(Src[0], Src[2]);
409 std::swap(Src[0], Src[1]);
411 case R600InstrInfo::ALU_VEC_210:
412 std::swap(Src[0], Src[2]);
419 getTransSwizzle(R600InstrInfo::BankSwizzle Swz, unsigned Op) {
421 case R600InstrInfo::ALU_VEC_012_SCL_210: {
422 unsigned Cycles[3] = { 2, 1, 0};
425 case R600InstrInfo::ALU_VEC_021_SCL_122: {
426 unsigned Cycles[3] = { 1, 2, 2};
429 case R600InstrInfo::ALU_VEC_120_SCL_212: {
430 unsigned Cycles[3] = { 2, 1, 2};
433 case R600InstrInfo::ALU_VEC_102_SCL_221: {
434 unsigned Cycles[3] = { 2, 2, 1};
438 llvm_unreachable("Wrong Swizzle for Trans Slot");
443 /// returns how many MIs (whose inputs are represented by IGSrcs) can be packed
444 /// in the same Instruction Group while meeting read port limitations given a
445 /// Swz swizzle sequence.
446 unsigned R600InstrInfo::isLegalUpTo(
447 const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
448 const std::vector<R600InstrInfo::BankSwizzle> &Swz,
449 const std::vector<std::pair<int, unsigned> > &TransSrcs,
450 R600InstrInfo::BankSwizzle TransSwz) const {
452 memset(Vector, -1, sizeof(Vector));
453 for (unsigned i = 0, e = IGSrcs.size(); i < e; i++) {
454 const std::vector<std::pair<int, unsigned> > &Srcs =
455 Swizzle(IGSrcs[i], Swz[i]);
456 for (unsigned j = 0; j < 3; j++) {
457 const std::pair<int, unsigned> &Src = Srcs[j];
458 if (Src.first < 0 || Src.first == 255)
460 if (Src.first == GET_REG_INDEX(RI.getEncodingValue(AMDGPU::OQAP))) {
461 if (Swz[i] != R600InstrInfo::ALU_VEC_012_SCL_210 &&
462 Swz[i] != R600InstrInfo::ALU_VEC_021_SCL_122) {
463 // The value from output queue A (denoted by register OQAP) can
464 // only be fetched during the first cycle.
467 // OQAP does not count towards the normal read port restrictions
470 if (Vector[Src.second][j] < 0)
471 Vector[Src.second][j] = Src.first;
472 if (Vector[Src.second][j] != Src.first)
476 // Now check Trans Alu
477 for (unsigned i = 0, e = TransSrcs.size(); i < e; ++i) {
478 const std::pair<int, unsigned> &Src = TransSrcs[i];
479 unsigned Cycle = getTransSwizzle(TransSwz, i);
482 if (Src.first == 255)
484 if (Vector[Src.second][Cycle] < 0)
485 Vector[Src.second][Cycle] = Src.first;
486 if (Vector[Src.second][Cycle] != Src.first)
487 return IGSrcs.size() - 1;
489 return IGSrcs.size();
492 /// Given a swizzle sequence SwzCandidate and an index Idx, returns the next
493 /// (in lexicographic term) swizzle sequence assuming that all swizzles after
494 /// Idx can be skipped
496 NextPossibleSolution(
497 std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
499 assert(Idx < SwzCandidate.size());
501 while (ResetIdx > -1 && SwzCandidate[ResetIdx] == R600InstrInfo::ALU_VEC_210)
503 for (unsigned i = ResetIdx + 1, e = SwzCandidate.size(); i < e; i++) {
504 SwzCandidate[i] = R600InstrInfo::ALU_VEC_012_SCL_210;
508 int NextSwizzle = SwzCandidate[ResetIdx] + 1;
509 SwzCandidate[ResetIdx] = (R600InstrInfo::BankSwizzle)NextSwizzle;
513 /// Enumerate all possible Swizzle sequence to find one that can meet all
514 /// read port requirements.
515 bool R600InstrInfo::FindSwizzleForVectorSlot(
516 const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
517 std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
518 const std::vector<std::pair<int, unsigned> > &TransSrcs,
519 R600InstrInfo::BankSwizzle TransSwz) const {
520 unsigned ValidUpTo = 0;
522 ValidUpTo = isLegalUpTo(IGSrcs, SwzCandidate, TransSrcs, TransSwz);
523 if (ValidUpTo == IGSrcs.size())
525 } while (NextPossibleSolution(SwzCandidate, ValidUpTo));
529 /// Instructions in Trans slot can't read gpr at cycle 0 if they also read
530 /// a const, and can't read a gpr at cycle 1 if they read 2 const.
532 isConstCompatible(R600InstrInfo::BankSwizzle TransSwz,
533 const std::vector<std::pair<int, unsigned> > &TransOps,
534 unsigned ConstCount) {
535 // TransALU can't read 3 constants
538 for (unsigned i = 0, e = TransOps.size(); i < e; ++i) {
539 const std::pair<int, unsigned> &Src = TransOps[i];
540 unsigned Cycle = getTransSwizzle(TransSwz, i);
543 if (ConstCount > 0 && Cycle == 0)
545 if (ConstCount > 1 && Cycle == 1)
552 R600InstrInfo::fitsReadPortLimitations(const std::vector<MachineInstr *> &IG,
553 const DenseMap<unsigned, unsigned> &PV,
554 std::vector<BankSwizzle> &ValidSwizzle,
557 //Todo : support shared src0 - src1 operand
559 std::vector<std::vector<std::pair<int, unsigned> > > IGSrcs;
560 ValidSwizzle.clear();
562 BankSwizzle TransBS = ALU_VEC_012_SCL_210;
563 for (unsigned i = 0, e = IG.size(); i < e; ++i) {
564 IGSrcs.push_back(ExtractSrcs(IG[i], PV, ConstCount));
565 unsigned Op = getOperandIdx(IG[i]->getOpcode(),
566 AMDGPU::OpName::bank_swizzle);
567 ValidSwizzle.push_back( (R600InstrInfo::BankSwizzle)
568 IG[i]->getOperand(Op).getImm());
570 std::vector<std::pair<int, unsigned> > TransOps;
572 return FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps, TransBS);
574 TransOps = std::move(IGSrcs.back());
576 ValidSwizzle.pop_back();
578 static const R600InstrInfo::BankSwizzle TransSwz[] = {
584 for (unsigned i = 0; i < 4; i++) {
585 TransBS = TransSwz[i];
586 if (!isConstCompatible(TransBS, TransOps, ConstCount))
588 bool Result = FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps,
591 ValidSwizzle.push_back(TransBS);
601 R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts)
603 assert (Consts.size() <= 12 && "Too many operands in instructions group");
604 unsigned Pair1 = 0, Pair2 = 0;
605 for (unsigned i = 0, n = Consts.size(); i < n; ++i) {
606 unsigned ReadConstHalf = Consts[i] & 2;
607 unsigned ReadConstIndex = Consts[i] & (~3);
608 unsigned ReadHalfConst = ReadConstIndex | ReadConstHalf;
610 Pair1 = ReadHalfConst;
613 if (Pair1 == ReadHalfConst)
616 Pair2 = ReadHalfConst;
619 if (Pair2 != ReadHalfConst)
626 R600InstrInfo::fitsConstReadLimitations(const std::vector<MachineInstr *> &MIs)
628 std::vector<unsigned> Consts;
629 SmallSet<int64_t, 4> Literals;
630 for (unsigned i = 0, n = MIs.size(); i < n; i++) {
631 MachineInstr *MI = MIs[i];
632 if (!isALUInstr(MI->getOpcode()))
635 const SmallVectorImpl<std::pair<MachineOperand *, int64_t> > &Srcs =
638 for (unsigned j = 0, e = Srcs.size(); j < e; j++) {
639 std::pair<MachineOperand *, unsigned> Src = Srcs[j];
640 if (Src.first->getReg() == AMDGPU::ALU_LITERAL_X)
641 Literals.insert(Src.second);
642 if (Literals.size() > 4)
644 if (Src.first->getReg() == AMDGPU::ALU_CONST)
645 Consts.push_back(Src.second);
646 if (AMDGPU::R600_KC0RegClass.contains(Src.first->getReg()) ||
647 AMDGPU::R600_KC1RegClass.contains(Src.first->getReg())) {
648 unsigned Index = RI.getEncodingValue(Src.first->getReg()) & 0xff;
649 unsigned Chan = RI.getHWRegChan(Src.first->getReg());
650 Consts.push_back((Index << 2) | Chan);
654 return fitsConstReadLimitations(Consts);
658 R600InstrInfo::CreateTargetScheduleState(const TargetSubtargetInfo &STI) const {
659 const InstrItineraryData *II = STI.getInstrItineraryData();
660 return static_cast<const AMDGPUSubtarget &>(STI).createDFAPacketizer(II);
664 isPredicateSetter(unsigned Opcode) {
673 static MachineInstr *
674 findFirstPredicateSetterFrom(MachineBasicBlock &MBB,
675 MachineBasicBlock::iterator I) {
676 while (I != MBB.begin()) {
678 MachineInstr *MI = I;
679 if (isPredicateSetter(MI->getOpcode()))
687 bool isJump(unsigned Opcode) {
688 return Opcode == AMDGPU::JUMP || Opcode == AMDGPU::JUMP_COND;
691 static bool isBranch(unsigned Opcode) {
692 return Opcode == AMDGPU::BRANCH || Opcode == AMDGPU::BRANCH_COND_i32 ||
693 Opcode == AMDGPU::BRANCH_COND_f32;
697 R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
698 MachineBasicBlock *&TBB,
699 MachineBasicBlock *&FBB,
700 SmallVectorImpl<MachineOperand> &Cond,
701 bool AllowModify) const {
702 // Most of the following comes from the ARM implementation of AnalyzeBranch
704 // If the block has no terminators, it just falls into the block after it.
705 MachineBasicBlock::iterator I = MBB.end();
706 if (I == MBB.begin())
709 while (I->isDebugValue()) {
710 if (I == MBB.begin())
714 // AMDGPU::BRANCH* instructions are only available after isel and are not
716 if (isBranch(I->getOpcode()))
718 if (!isJump(static_cast<MachineInstr *>(I)->getOpcode())) {
722 // Remove successive JUMP
723 while (I != MBB.begin() && std::prev(I)->getOpcode() == AMDGPU::JUMP) {
724 MachineBasicBlock::iterator PriorI = std::prev(I);
726 I->removeFromParent();
729 MachineInstr *LastInst = I;
731 // If there is only one terminator instruction, process it.
732 unsigned LastOpc = LastInst->getOpcode();
733 if (I == MBB.begin() ||
734 !isJump(static_cast<MachineInstr *>(--I)->getOpcode())) {
735 if (LastOpc == AMDGPU::JUMP) {
736 TBB = LastInst->getOperand(0).getMBB();
738 } else if (LastOpc == AMDGPU::JUMP_COND) {
739 MachineInstr *predSet = I;
740 while (!isPredicateSetter(predSet->getOpcode())) {
743 TBB = LastInst->getOperand(0).getMBB();
744 Cond.push_back(predSet->getOperand(1));
745 Cond.push_back(predSet->getOperand(2));
746 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
749 return true; // Can't handle indirect branch.
752 // Get the instruction before it if it is a terminator.
753 MachineInstr *SecondLastInst = I;
754 unsigned SecondLastOpc = SecondLastInst->getOpcode();
756 // If the block ends with a B and a Bcc, handle it.
757 if (SecondLastOpc == AMDGPU::JUMP_COND && LastOpc == AMDGPU::JUMP) {
758 MachineInstr *predSet = --I;
759 while (!isPredicateSetter(predSet->getOpcode())) {
762 TBB = SecondLastInst->getOperand(0).getMBB();
763 FBB = LastInst->getOperand(0).getMBB();
764 Cond.push_back(predSet->getOperand(1));
765 Cond.push_back(predSet->getOperand(2));
766 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
770 // Otherwise, can't handle this.
775 MachineBasicBlock::iterator FindLastAluClause(MachineBasicBlock &MBB) {
776 for (MachineBasicBlock::reverse_iterator It = MBB.rbegin(), E = MBB.rend();
778 if (It->getOpcode() == AMDGPU::CF_ALU ||
779 It->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE)
780 return std::prev(It.base());
786 R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
787 MachineBasicBlock *TBB,
788 MachineBasicBlock *FBB,
789 const SmallVectorImpl<MachineOperand> &Cond,
791 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
795 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(TBB);
798 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
799 assert(PredSet && "No previous predicate !");
800 addFlag(PredSet, 0, MO_FLAG_PUSH);
801 PredSet->getOperand(2).setImm(Cond[1].getImm());
803 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
805 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
806 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
807 if (CfAlu == MBB.end())
809 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU);
810 CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE));
814 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
815 assert(PredSet && "No previous predicate !");
816 addFlag(PredSet, 0, MO_FLAG_PUSH);
817 PredSet->getOperand(2).setImm(Cond[1].getImm());
818 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
820 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
821 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(FBB);
822 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
823 if (CfAlu == MBB.end())
825 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU);
826 CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE));
832 R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
834 // Note : we leave PRED* instructions there.
835 // They may be needed when predicating instructions.
837 MachineBasicBlock::iterator I = MBB.end();
839 if (I == MBB.begin()) {
843 switch (I->getOpcode()) {
846 case AMDGPU::JUMP_COND: {
847 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
848 clearFlag(predSet, 0, MO_FLAG_PUSH);
849 I->eraseFromParent();
850 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
851 if (CfAlu == MBB.end())
853 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE);
854 CfAlu->setDesc(get(AMDGPU::CF_ALU));
858 I->eraseFromParent();
863 if (I == MBB.begin()) {
867 switch (I->getOpcode()) {
868 // FIXME: only one case??
871 case AMDGPU::JUMP_COND: {
872 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
873 clearFlag(predSet, 0, MO_FLAG_PUSH);
874 I->eraseFromParent();
875 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
876 if (CfAlu == MBB.end())
878 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE);
879 CfAlu->setDesc(get(AMDGPU::CF_ALU));
883 I->eraseFromParent();
890 R600InstrInfo::isPredicated(const MachineInstr *MI) const {
891 int idx = MI->findFirstPredOperandIdx();
895 unsigned Reg = MI->getOperand(idx).getReg();
897 default: return false;
898 case AMDGPU::PRED_SEL_ONE:
899 case AMDGPU::PRED_SEL_ZERO:
900 case AMDGPU::PREDICATE_BIT:
906 R600InstrInfo::isPredicable(MachineInstr *MI) const {
907 // XXX: KILL* instructions can be predicated, but they must be the last
908 // instruction in a clause, so this means any instructions after them cannot
909 // be predicated. Until we have proper support for instruction clauses in the
910 // backend, we will mark KILL* instructions as unpredicable.
912 if (MI->getOpcode() == AMDGPU::KILLGT) {
914 } else if (MI->getOpcode() == AMDGPU::CF_ALU) {
915 // If the clause start in the middle of MBB then the MBB has more
916 // than a single clause, unable to predicate several clauses.
917 if (MI->getParent()->begin() != MachineBasicBlock::iterator(MI))
919 // TODO: We don't support KC merging atm
920 if (MI->getOperand(3).getImm() != 0 || MI->getOperand(4).getImm() != 0)
923 } else if (isVector(*MI)) {
926 return AMDGPUInstrInfo::isPredicable(MI);
932 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
934 unsigned ExtraPredCycles,
935 const BranchProbability &Probability) const{
940 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
942 unsigned ExtraTCycles,
943 MachineBasicBlock &FMBB,
945 unsigned ExtraFCycles,
946 const BranchProbability &Probability) const {
951 R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
953 const BranchProbability &Probability)
959 R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
960 MachineBasicBlock &FMBB) const {
966 R600InstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
967 MachineOperand &MO = Cond[1];
968 switch (MO.getImm()) {
969 case OPCODE_IS_ZERO_INT:
970 MO.setImm(OPCODE_IS_NOT_ZERO_INT);
972 case OPCODE_IS_NOT_ZERO_INT:
973 MO.setImm(OPCODE_IS_ZERO_INT);
976 MO.setImm(OPCODE_IS_NOT_ZERO);
978 case OPCODE_IS_NOT_ZERO:
979 MO.setImm(OPCODE_IS_ZERO);
985 MachineOperand &MO2 = Cond[2];
986 switch (MO2.getReg()) {
987 case AMDGPU::PRED_SEL_ZERO:
988 MO2.setReg(AMDGPU::PRED_SEL_ONE);
990 case AMDGPU::PRED_SEL_ONE:
991 MO2.setReg(AMDGPU::PRED_SEL_ZERO);
1000 R600InstrInfo::DefinesPredicate(MachineInstr *MI,
1001 std::vector<MachineOperand> &Pred) const {
1002 return isPredicateSetter(MI->getOpcode());
1007 R600InstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
1008 const SmallVectorImpl<MachineOperand> &Pred2) const {
1014 R600InstrInfo::PredicateInstruction(MachineInstr *MI,
1015 const SmallVectorImpl<MachineOperand> &Pred) const {
1016 int PIdx = MI->findFirstPredOperandIdx();
1018 if (MI->getOpcode() == AMDGPU::CF_ALU) {
1019 MI->getOperand(8).setImm(0);
1023 if (MI->getOpcode() == AMDGPU::DOT_4) {
1024 MI->getOperand(getOperandIdx(*MI, AMDGPU::OpName::pred_sel_X))
1025 .setReg(Pred[2].getReg());
1026 MI->getOperand(getOperandIdx(*MI, AMDGPU::OpName::pred_sel_Y))
1027 .setReg(Pred[2].getReg());
1028 MI->getOperand(getOperandIdx(*MI, AMDGPU::OpName::pred_sel_Z))
1029 .setReg(Pred[2].getReg());
1030 MI->getOperand(getOperandIdx(*MI, AMDGPU::OpName::pred_sel_W))
1031 .setReg(Pred[2].getReg());
1032 MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
1033 MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
1038 MachineOperand &PMO = MI->getOperand(PIdx);
1039 PMO.setReg(Pred[2].getReg());
1040 MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
1041 MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
1048 unsigned int R600InstrInfo::getPredicationCost(const MachineInstr *) const {
1052 unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1053 const MachineInstr *MI,
1054 unsigned *PredCost) const {
1060 bool R600InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
1062 switch(MI->getOpcode()) {
1063 default: return AMDGPUInstrInfo::expandPostRAPseudo(MI);
1064 case AMDGPU::R600_EXTRACT_ELT_V2:
1065 case AMDGPU::R600_EXTRACT_ELT_V4:
1066 buildIndirectRead(MI->getParent(), MI, MI->getOperand(0).getReg(),
1067 RI.getHWRegIndex(MI->getOperand(1).getReg()), // Address
1068 MI->getOperand(2).getReg(),
1069 RI.getHWRegChan(MI->getOperand(1).getReg()));
1071 case AMDGPU::R600_INSERT_ELT_V2:
1072 case AMDGPU::R600_INSERT_ELT_V4:
1073 buildIndirectWrite(MI->getParent(), MI, MI->getOperand(2).getReg(), // Value
1074 RI.getHWRegIndex(MI->getOperand(1).getReg()), // Address
1075 MI->getOperand(3).getReg(), // Offset
1076 RI.getHWRegChan(MI->getOperand(1).getReg())); // Channel
1079 MI->eraseFromParent();
1083 void R600InstrInfo::reserveIndirectRegisters(BitVector &Reserved,
1084 const MachineFunction &MF) const {
1085 const AMDGPUFrameLowering *TFL = static_cast<const AMDGPUFrameLowering *>(
1086 MF.getSubtarget().getFrameLowering());
1088 unsigned StackWidth = TFL->getStackWidth(MF);
1089 int End = getIndirectIndexEnd(MF);
1094 for (int Index = getIndirectIndexBegin(MF); Index <= End; ++Index) {
1095 unsigned SuperReg = AMDGPU::R600_Reg128RegClass.getRegister(Index);
1096 Reserved.set(SuperReg);
1097 for (unsigned Chan = 0; Chan < StackWidth; ++Chan) {
1098 unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister((4 * Index) + Chan);
1104 unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex,
1105 unsigned Channel) const {
1106 // XXX: Remove when we support a stack width > 2
1107 assert(Channel == 0);
1111 const TargetRegisterClass *R600InstrInfo::getIndirectAddrRegClass() const {
1112 return &AMDGPU::R600_TReg32_XRegClass;
1115 MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
1116 MachineBasicBlock::iterator I,
1117 unsigned ValueReg, unsigned Address,
1118 unsigned OffsetReg) const {
1119 return buildIndirectWrite(MBB, I, ValueReg, Address, OffsetReg, 0);
1122 MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
1123 MachineBasicBlock::iterator I,
1124 unsigned ValueReg, unsigned Address,
1126 unsigned AddrChan) const {
1129 default: llvm_unreachable("Invalid Channel");
1130 case 0: AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address); break;
1131 case 1: AddrReg = AMDGPU::R600_Addr_YRegClass.getRegister(Address); break;
1132 case 2: AddrReg = AMDGPU::R600_Addr_ZRegClass.getRegister(Address); break;
1133 case 3: AddrReg = AMDGPU::R600_Addr_WRegClass.getRegister(Address); break;
1135 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
1136 AMDGPU::AR_X, OffsetReg);
1137 setImmOperand(MOVA, AMDGPU::OpName::write, 0);
1139 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
1141 .addReg(AMDGPU::AR_X,
1142 RegState::Implicit | RegState::Kill);
1143 setImmOperand(Mov, AMDGPU::OpName::dst_rel, 1);
1147 MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
1148 MachineBasicBlock::iterator I,
1149 unsigned ValueReg, unsigned Address,
1150 unsigned OffsetReg) const {
1151 return buildIndirectRead(MBB, I, ValueReg, Address, OffsetReg, 0);
1154 MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
1155 MachineBasicBlock::iterator I,
1156 unsigned ValueReg, unsigned Address,
1158 unsigned AddrChan) const {
1161 default: llvm_unreachable("Invalid Channel");
1162 case 0: AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address); break;
1163 case 1: AddrReg = AMDGPU::R600_Addr_YRegClass.getRegister(Address); break;
1164 case 2: AddrReg = AMDGPU::R600_Addr_ZRegClass.getRegister(Address); break;
1165 case 3: AddrReg = AMDGPU::R600_Addr_WRegClass.getRegister(Address); break;
1167 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
1170 setImmOperand(MOVA, AMDGPU::OpName::write, 0);
1171 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
1174 .addReg(AMDGPU::AR_X,
1175 RegState::Implicit | RegState::Kill);
1176 setImmOperand(Mov, AMDGPU::OpName::src0_rel, 1);
1181 unsigned R600InstrInfo::getMaxAlusPerClause() const {
1185 MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MBB,
1186 MachineBasicBlock::iterator I,
1190 unsigned Src1Reg) const {
1191 MachineInstrBuilder MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opcode),
1195 MIB.addImm(0) // $update_exec_mask
1196 .addImm(0); // $update_predicate
1198 MIB.addImm(1) // $write
1200 .addImm(0) // $dst_rel
1201 .addImm(0) // $dst_clamp
1202 .addReg(Src0Reg) // $src0
1203 .addImm(0) // $src0_neg
1204 .addImm(0) // $src0_rel
1205 .addImm(0) // $src0_abs
1206 .addImm(-1); // $src0_sel
1209 MIB.addReg(Src1Reg) // $src1
1210 .addImm(0) // $src1_neg
1211 .addImm(0) // $src1_rel
1212 .addImm(0) // $src1_abs
1213 .addImm(-1); // $src1_sel
1216 //XXX: The r600g finalizer expects this to be 1, once we've moved the
1217 //scheduling to the backend, we can change the default to 0.
1218 MIB.addImm(1) // $last
1219 .addReg(AMDGPU::PRED_SEL_OFF) // $pred_sel
1220 .addImm(0) // $literal
1221 .addImm(0); // $bank_swizzle
1226 #define OPERAND_CASE(Label) \
1228 static const unsigned Ops[] = \
1238 static unsigned getSlotedOps(unsigned Op, unsigned Slot) {
1240 OPERAND_CASE(AMDGPU::OpName::update_exec_mask)
1241 OPERAND_CASE(AMDGPU::OpName::update_pred)
1242 OPERAND_CASE(AMDGPU::OpName::write)
1243 OPERAND_CASE(AMDGPU::OpName::omod)
1244 OPERAND_CASE(AMDGPU::OpName::dst_rel)
1245 OPERAND_CASE(AMDGPU::OpName::clamp)
1246 OPERAND_CASE(AMDGPU::OpName::src0)
1247 OPERAND_CASE(AMDGPU::OpName::src0_neg)
1248 OPERAND_CASE(AMDGPU::OpName::src0_rel)
1249 OPERAND_CASE(AMDGPU::OpName::src0_abs)
1250 OPERAND_CASE(AMDGPU::OpName::src0_sel)
1251 OPERAND_CASE(AMDGPU::OpName::src1)
1252 OPERAND_CASE(AMDGPU::OpName::src1_neg)
1253 OPERAND_CASE(AMDGPU::OpName::src1_rel)
1254 OPERAND_CASE(AMDGPU::OpName::src1_abs)
1255 OPERAND_CASE(AMDGPU::OpName::src1_sel)
1256 OPERAND_CASE(AMDGPU::OpName::pred_sel)
1258 llvm_unreachable("Wrong Operand");
1264 MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction(
1265 MachineBasicBlock &MBB, MachineInstr *MI, unsigned Slot, unsigned DstReg)
1267 assert (MI->getOpcode() == AMDGPU::DOT_4 && "Not Implemented");
1269 if (ST.getGeneration() <= AMDGPUSubtarget::R700)
1270 Opcode = AMDGPU::DOT4_r600;
1272 Opcode = AMDGPU::DOT4_eg;
1273 MachineBasicBlock::iterator I = MI;
1274 MachineOperand &Src0 = MI->getOperand(
1275 getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src0, Slot)));
1276 MachineOperand &Src1 = MI->getOperand(
1277 getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src1, Slot)));
1278 MachineInstr *MIB = buildDefaultInstruction(
1279 MBB, I, Opcode, DstReg, Src0.getReg(), Src1.getReg());
1280 static const unsigned Operands[14] = {
1281 AMDGPU::OpName::update_exec_mask,
1282 AMDGPU::OpName::update_pred,
1283 AMDGPU::OpName::write,
1284 AMDGPU::OpName::omod,
1285 AMDGPU::OpName::dst_rel,
1286 AMDGPU::OpName::clamp,
1287 AMDGPU::OpName::src0_neg,
1288 AMDGPU::OpName::src0_rel,
1289 AMDGPU::OpName::src0_abs,
1290 AMDGPU::OpName::src0_sel,
1291 AMDGPU::OpName::src1_neg,
1292 AMDGPU::OpName::src1_rel,
1293 AMDGPU::OpName::src1_abs,
1294 AMDGPU::OpName::src1_sel,
1297 MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(),
1298 getSlotedOps(AMDGPU::OpName::pred_sel, Slot)));
1299 MIB->getOperand(getOperandIdx(Opcode, AMDGPU::OpName::pred_sel))
1300 .setReg(MO.getReg());
1302 for (unsigned i = 0; i < 14; i++) {
1303 MachineOperand &MO = MI->getOperand(
1304 getOperandIdx(MI->getOpcode(), getSlotedOps(Operands[i], Slot)));
1305 assert (MO.isImm());
1306 setImmOperand(MIB, Operands[i], MO.getImm());
1308 MIB->getOperand(20).setImm(0);
1312 MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
1313 MachineBasicBlock::iterator I,
1315 uint64_t Imm) const {
1316 MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg,
1317 AMDGPU::ALU_LITERAL_X);
1318 setImmOperand(MovImm, AMDGPU::OpName::literal, Imm);
1322 MachineInstr *R600InstrInfo::buildMovInstr(MachineBasicBlock *MBB,
1323 MachineBasicBlock::iterator I,
1324 unsigned DstReg, unsigned SrcReg) const {
1325 return buildDefaultInstruction(*MBB, I, AMDGPU::MOV, DstReg, SrcReg);
1328 int R600InstrInfo::getOperandIdx(const MachineInstr &MI, unsigned Op) const {
1329 return getOperandIdx(MI.getOpcode(), Op);
1332 int R600InstrInfo::getOperandIdx(unsigned Opcode, unsigned Op) const {
1333 return AMDGPU::getNamedOperandIdx(Opcode, Op);
1336 void R600InstrInfo::setImmOperand(MachineInstr *MI, unsigned Op,
1337 int64_t Imm) const {
1338 int Idx = getOperandIdx(*MI, Op);
1339 assert(Idx != -1 && "Operand not supported for this instruction.");
1340 assert(MI->getOperand(Idx).isImm());
1341 MI->getOperand(Idx).setImm(Imm);
1344 //===----------------------------------------------------------------------===//
1345 // Instruction flag getters/setters
1346 //===----------------------------------------------------------------------===//
1348 bool R600InstrInfo::hasFlagOperand(const MachineInstr &MI) const {
1349 return GET_FLAG_OPERAND_IDX(get(MI.getOpcode()).TSFlags) != 0;
1352 MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
1353 unsigned Flag) const {
1354 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
1357 // If we pass something other than the default value of Flag to this
1358 // function, it means we are want to set a flag on an instruction
1359 // that uses native encoding.
1360 assert(HAS_NATIVE_OPERANDS(TargetFlags));
1361 bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3;
1364 FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::clamp);
1367 FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::write);
1369 case MO_FLAG_NOT_LAST:
1371 FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::last);
1375 case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_neg); break;
1376 case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_neg); break;
1377 case 2: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src2_neg); break;
1382 assert(!IsOP3 && "Cannot set absolute value modifier for OP3 "
1386 case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_abs); break;
1387 case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_abs); break;
1395 assert(FlagIndex != -1 && "Flag not supported for this instruction");
1397 FlagIndex = GET_FLAG_OPERAND_IDX(TargetFlags);
1398 assert(FlagIndex != 0 &&
1399 "Instruction flags not supported for this instruction");
1402 MachineOperand &FlagOp = MI->getOperand(FlagIndex);
1403 assert(FlagOp.isImm());
1407 void R600InstrInfo::addFlag(MachineInstr *MI, unsigned Operand,
1408 unsigned Flag) const {
1409 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
1413 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
1414 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1415 if (Flag == MO_FLAG_NOT_LAST) {
1416 clearFlag(MI, Operand, MO_FLAG_LAST);
1417 } else if (Flag == MO_FLAG_MASK) {
1418 clearFlag(MI, Operand, Flag);
1423 MachineOperand &FlagOp = getFlagOp(MI, Operand);
1424 FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * Operand)));
1428 void R600InstrInfo::clearFlag(MachineInstr *MI, unsigned Operand,
1429 unsigned Flag) const {
1430 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
1431 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
1432 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1435 MachineOperand &FlagOp = getFlagOp(MI);
1436 unsigned InstFlags = FlagOp.getImm();
1437 InstFlags &= ~(Flag << (NUM_MO_FLAGS * Operand));
1438 FlagOp.setImm(InstFlags);