1 //===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief R600 Implementation of TargetInstrInfo.
13 //===----------------------------------------------------------------------===//
15 #include "R600InstrInfo.h"
17 #include "AMDGPUSubtarget.h"
18 #include "AMDGPUTargetMachine.h"
19 #include "R600Defines.h"
20 #include "R600MachineFunctionInfo.h"
21 #include "R600RegisterInfo.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #define GET_INSTRINFO_CTOR_DTOR
27 #include "AMDGPUGenDFAPacketizer.inc"
31 R600InstrInfo::R600InstrInfo(AMDGPUTargetMachine &tm)
32 : AMDGPUInstrInfo(tm),
34 ST(tm.getSubtarget<AMDGPUSubtarget>())
37 const R600RegisterInfo &R600InstrInfo::getRegisterInfo() const {
41 bool R600InstrInfo::isTrig(const MachineInstr &MI) const {
42 return get(MI.getOpcode()).TSFlags & R600_InstFlag::TRIG;
45 bool R600InstrInfo::isVector(const MachineInstr &MI) const {
46 return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR;
50 R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
51 MachineBasicBlock::iterator MI, DebugLoc DL,
52 unsigned DestReg, unsigned SrcReg,
54 unsigned VectorComponents = 0;
55 if (AMDGPU::R600_Reg128RegClass.contains(DestReg) &&
56 AMDGPU::R600_Reg128RegClass.contains(SrcReg)) {
58 } else if(AMDGPU::R600_Reg64RegClass.contains(DestReg) &&
59 AMDGPU::R600_Reg64RegClass.contains(SrcReg)) {
63 if (VectorComponents > 0) {
64 for (unsigned I = 0; I < VectorComponents; I++) {
65 unsigned SubRegIndex = RI.getSubRegFromChannel(I);
66 buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
67 RI.getSubReg(DestReg, SubRegIndex),
68 RI.getSubReg(SrcReg, SubRegIndex))
70 RegState::Define | RegState::Implicit);
73 MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
75 NewMI->getOperand(getOperandIdx(*NewMI, AMDGPU::OpName::src0))
80 unsigned R600InstrInfo::getIEQOpcode() const {
81 return AMDGPU::SETE_INT;
84 bool R600InstrInfo::isMov(unsigned Opcode) const {
88 default: return false;
90 case AMDGPU::MOV_IMM_F32:
91 case AMDGPU::MOV_IMM_I32:
96 // Some instructions act as place holders to emulate operations that the GPU
97 // hardware does automatically. This function can be used to check if
98 // an opcode falls into this category.
99 bool R600InstrInfo::isPlaceHolderOpcode(unsigned Opcode) const {
101 default: return false;
107 bool R600InstrInfo::isReductionOp(unsigned Opcode) const {
111 bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
113 default: return false;
114 case AMDGPU::CUBE_r600_pseudo:
115 case AMDGPU::CUBE_r600_real:
116 case AMDGPU::CUBE_eg_pseudo:
117 case AMDGPU::CUBE_eg_real:
122 bool R600InstrInfo::isALUInstr(unsigned Opcode) const {
123 unsigned TargetFlags = get(Opcode).TSFlags;
125 return (TargetFlags & R600_InstFlag::ALU_INST);
128 bool R600InstrInfo::hasInstrModifiers(unsigned Opcode) const {
129 unsigned TargetFlags = get(Opcode).TSFlags;
131 return ((TargetFlags & R600_InstFlag::OP1) |
132 (TargetFlags & R600_InstFlag::OP2) |
133 (TargetFlags & R600_InstFlag::OP3));
136 bool R600InstrInfo::isLDSInstr(unsigned Opcode) const {
137 unsigned TargetFlags = get(Opcode).TSFlags;
139 return ((TargetFlags & R600_InstFlag::LDS_1A) |
140 (TargetFlags & R600_InstFlag::LDS_1A1D) |
141 (TargetFlags & R600_InstFlag::LDS_1A2D));
144 bool R600InstrInfo::isLDSNoRetInstr(unsigned Opcode) const {
145 return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) == -1;
148 bool R600InstrInfo::isLDSRetInstr(unsigned Opcode) const {
149 return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) != -1;
152 bool R600InstrInfo::canBeConsideredALU(const MachineInstr *MI) const {
153 if (isALUInstr(MI->getOpcode()))
155 if (isVector(*MI) || isCubeOp(MI->getOpcode()))
157 switch (MI->getOpcode()) {
159 case AMDGPU::INTERP_PAIR_XY:
160 case AMDGPU::INTERP_PAIR_ZW:
161 case AMDGPU::INTERP_VEC_LOAD:
170 bool R600InstrInfo::isTransOnly(unsigned Opcode) const {
171 if (ST.hasCaymanISA())
173 return (get(Opcode).getSchedClass() == AMDGPU::Sched::TransALU);
176 bool R600InstrInfo::isTransOnly(const MachineInstr *MI) const {
177 return isTransOnly(MI->getOpcode());
180 bool R600InstrInfo::isVectorOnly(unsigned Opcode) const {
181 return (get(Opcode).getSchedClass() == AMDGPU::Sched::VecALU);
184 bool R600InstrInfo::isVectorOnly(const MachineInstr *MI) const {
185 return isVectorOnly(MI->getOpcode());
188 bool R600InstrInfo::isExport(unsigned Opcode) const {
189 return (get(Opcode).TSFlags & R600_InstFlag::IS_EXPORT);
192 bool R600InstrInfo::usesVertexCache(unsigned Opcode) const {
193 return ST.hasVertexCache() && IS_VTX(get(Opcode));
196 bool R600InstrInfo::usesVertexCache(const MachineInstr *MI) const {
197 const R600MachineFunctionInfo *MFI = MI->getParent()->getParent()->getInfo<R600MachineFunctionInfo>();
198 return MFI->ShaderType != ShaderType::COMPUTE && usesVertexCache(MI->getOpcode());
201 bool R600InstrInfo::usesTextureCache(unsigned Opcode) const {
202 return (!ST.hasVertexCache() && IS_VTX(get(Opcode))) || IS_TEX(get(Opcode));
205 bool R600InstrInfo::usesTextureCache(const MachineInstr *MI) const {
206 const R600MachineFunctionInfo *MFI = MI->getParent()->getParent()->getInfo<R600MachineFunctionInfo>();
207 return (MFI->ShaderType == ShaderType::COMPUTE && usesVertexCache(MI->getOpcode())) ||
208 usesTextureCache(MI->getOpcode());
211 bool R600InstrInfo::mustBeLastInClause(unsigned Opcode) const {
214 case AMDGPU::GROUP_BARRIER:
221 bool R600InstrInfo::usesAddressRegister(MachineInstr *MI) const {
222 return MI->findRegisterUseOperandIdx(AMDGPU::AR_X) != -1;
225 bool R600InstrInfo::definesAddressRegister(MachineInstr *MI) const {
226 return MI->findRegisterDefOperandIdx(AMDGPU::AR_X) != -1;
229 bool R600InstrInfo::readsLDSSrcReg(const MachineInstr *MI) const {
230 if (!isALUInstr(MI->getOpcode())) {
233 for (MachineInstr::const_mop_iterator I = MI->operands_begin(),
234 E = MI->operands_end(); I != E; ++I) {
235 if (!I->isReg() || !I->isUse() ||
236 TargetRegisterInfo::isVirtualRegister(I->getReg()))
239 if (AMDGPU::R600_LDS_SRC_REGRegClass.contains(I->getReg()))
245 int R600InstrInfo::getSrcIdx(unsigned Opcode, unsigned SrcNum) const {
246 static const unsigned OpTable[] = {
247 AMDGPU::OpName::src0,
248 AMDGPU::OpName::src1,
253 return getOperandIdx(Opcode, OpTable[SrcNum]);
256 #define SRC_SEL_ROWS 11
257 int R600InstrInfo::getSelIdx(unsigned Opcode, unsigned SrcIdx) const {
258 static const unsigned SrcSelTable[SRC_SEL_ROWS][2] = {
259 {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
260 {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel},
261 {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel},
262 {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
263 {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
264 {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z},
265 {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W},
266 {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X},
267 {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y},
268 {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z},
269 {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W}
272 for (unsigned i = 0; i < SRC_SEL_ROWS; ++i) {
273 if (getOperandIdx(Opcode, SrcSelTable[i][0]) == (int)SrcIdx) {
274 return getOperandIdx(Opcode, SrcSelTable[i][1]);
281 SmallVector<std::pair<MachineOperand *, int64_t>, 3>
282 R600InstrInfo::getSrcs(MachineInstr *MI) const {
283 SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result;
285 if (MI->getOpcode() == AMDGPU::DOT_4) {
286 static const unsigned OpTable[8][2] = {
287 {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
288 {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
289 {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z},
290 {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W},
291 {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X},
292 {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y},
293 {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z},
294 {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W},
297 for (unsigned j = 0; j < 8; j++) {
298 MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(),
300 unsigned Reg = MO.getReg();
301 if (Reg == AMDGPU::ALU_CONST) {
302 unsigned Sel = MI->getOperand(getOperandIdx(MI->getOpcode(),
303 OpTable[j][1])).getImm();
304 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel));
312 static const unsigned OpTable[3][2] = {
313 {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
314 {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel},
315 {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel},
318 for (unsigned j = 0; j < 3; j++) {
319 int SrcIdx = getOperandIdx(MI->getOpcode(), OpTable[j][0]);
322 MachineOperand &MO = MI->getOperand(SrcIdx);
323 unsigned Reg = MI->getOperand(SrcIdx).getReg();
324 if (Reg == AMDGPU::ALU_CONST) {
325 unsigned Sel = MI->getOperand(
326 getOperandIdx(MI->getOpcode(), OpTable[j][1])).getImm();
327 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel));
330 if (Reg == AMDGPU::ALU_LITERAL_X) {
331 unsigned Imm = MI->getOperand(
332 getOperandIdx(MI->getOpcode(), AMDGPU::OpName::literal)).getImm();
333 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Imm));
336 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, 0));
341 std::vector<std::pair<int, unsigned> >
342 R600InstrInfo::ExtractSrcs(MachineInstr *MI,
343 const DenseMap<unsigned, unsigned> &PV,
344 unsigned &ConstCount) const {
346 const SmallVector<std::pair<MachineOperand *, int64_t>, 3> Srcs = getSrcs(MI);
347 const std::pair<int, unsigned> DummyPair(-1, 0);
348 std::vector<std::pair<int, unsigned> > Result;
350 for (unsigned n = Srcs.size(); i < n; ++i) {
351 unsigned Reg = Srcs[i].first->getReg();
352 unsigned Index = RI.getEncodingValue(Reg) & 0xff;
353 if (Reg == AMDGPU::OQAP) {
354 Result.push_back(std::pair<int, unsigned>(Index, 0));
356 if (PV.find(Reg) != PV.end()) {
357 // 255 is used to tells its a PS/PV reg
358 Result.push_back(std::pair<int, unsigned>(255, 0));
363 Result.push_back(DummyPair);
366 unsigned Chan = RI.getHWRegChan(Reg);
367 Result.push_back(std::pair<int, unsigned>(Index, Chan));
370 Result.push_back(DummyPair);
374 static std::vector<std::pair<int, unsigned> >
375 Swizzle(std::vector<std::pair<int, unsigned> > Src,
376 R600InstrInfo::BankSwizzle Swz) {
377 if (Src[0] == Src[1])
380 case R600InstrInfo::ALU_VEC_012_SCL_210:
382 case R600InstrInfo::ALU_VEC_021_SCL_122:
383 std::swap(Src[1], Src[2]);
385 case R600InstrInfo::ALU_VEC_102_SCL_221:
386 std::swap(Src[0], Src[1]);
388 case R600InstrInfo::ALU_VEC_120_SCL_212:
389 std::swap(Src[0], Src[1]);
390 std::swap(Src[0], Src[2]);
392 case R600InstrInfo::ALU_VEC_201:
393 std::swap(Src[0], Src[2]);
394 std::swap(Src[0], Src[1]);
396 case R600InstrInfo::ALU_VEC_210:
397 std::swap(Src[0], Src[2]);
404 getTransSwizzle(R600InstrInfo::BankSwizzle Swz, unsigned Op) {
406 case R600InstrInfo::ALU_VEC_012_SCL_210: {
407 unsigned Cycles[3] = { 2, 1, 0};
410 case R600InstrInfo::ALU_VEC_021_SCL_122: {
411 unsigned Cycles[3] = { 1, 2, 2};
414 case R600InstrInfo::ALU_VEC_120_SCL_212: {
415 unsigned Cycles[3] = { 2, 1, 2};
418 case R600InstrInfo::ALU_VEC_102_SCL_221: {
419 unsigned Cycles[3] = { 2, 2, 1};
423 llvm_unreachable("Wrong Swizzle for Trans Slot");
428 /// returns how many MIs (whose inputs are represented by IGSrcs) can be packed
429 /// in the same Instruction Group while meeting read port limitations given a
430 /// Swz swizzle sequence.
431 unsigned R600InstrInfo::isLegalUpTo(
432 const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
433 const std::vector<R600InstrInfo::BankSwizzle> &Swz,
434 const std::vector<std::pair<int, unsigned> > &TransSrcs,
435 R600InstrInfo::BankSwizzle TransSwz) const {
437 memset(Vector, -1, sizeof(Vector));
438 for (unsigned i = 0, e = IGSrcs.size(); i < e; i++) {
439 const std::vector<std::pair<int, unsigned> > &Srcs =
440 Swizzle(IGSrcs[i], Swz[i]);
441 for (unsigned j = 0; j < 3; j++) {
442 const std::pair<int, unsigned> &Src = Srcs[j];
443 if (Src.first < 0 || Src.first == 255)
445 if (Src.first == GET_REG_INDEX(RI.getEncodingValue(AMDGPU::OQAP))) {
446 if (Swz[i] != R600InstrInfo::ALU_VEC_012_SCL_210 &&
447 Swz[i] != R600InstrInfo::ALU_VEC_021_SCL_122) {
448 // The value from output queue A (denoted by register OQAP) can
449 // only be fetched during the first cycle.
452 // OQAP does not count towards the normal read port restrictions
455 if (Vector[Src.second][j] < 0)
456 Vector[Src.second][j] = Src.first;
457 if (Vector[Src.second][j] != Src.first)
461 // Now check Trans Alu
462 for (unsigned i = 0, e = TransSrcs.size(); i < e; ++i) {
463 const std::pair<int, unsigned> &Src = TransSrcs[i];
464 unsigned Cycle = getTransSwizzle(TransSwz, i);
467 if (Src.first == 255)
469 if (Vector[Src.second][Cycle] < 0)
470 Vector[Src.second][Cycle] = Src.first;
471 if (Vector[Src.second][Cycle] != Src.first)
472 return IGSrcs.size() - 1;
474 return IGSrcs.size();
477 /// Given a swizzle sequence SwzCandidate and an index Idx, returns the next
478 /// (in lexicographic term) swizzle sequence assuming that all swizzles after
479 /// Idx can be skipped
481 NextPossibleSolution(
482 std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
484 assert(Idx < SwzCandidate.size());
486 while (ResetIdx > -1 && SwzCandidate[ResetIdx] == R600InstrInfo::ALU_VEC_210)
488 for (unsigned i = ResetIdx + 1, e = SwzCandidate.size(); i < e; i++) {
489 SwzCandidate[i] = R600InstrInfo::ALU_VEC_012_SCL_210;
493 int NextSwizzle = SwzCandidate[ResetIdx] + 1;
494 SwzCandidate[ResetIdx] = (R600InstrInfo::BankSwizzle)NextSwizzle;
498 /// Enumerate all possible Swizzle sequence to find one that can meet all
499 /// read port requirements.
500 bool R600InstrInfo::FindSwizzleForVectorSlot(
501 const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
502 std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
503 const std::vector<std::pair<int, unsigned> > &TransSrcs,
504 R600InstrInfo::BankSwizzle TransSwz) const {
505 unsigned ValidUpTo = 0;
507 ValidUpTo = isLegalUpTo(IGSrcs, SwzCandidate, TransSrcs, TransSwz);
508 if (ValidUpTo == IGSrcs.size())
510 } while (NextPossibleSolution(SwzCandidate, ValidUpTo));
514 /// Instructions in Trans slot can't read gpr at cycle 0 if they also read
515 /// a const, and can't read a gpr at cycle 1 if they read 2 const.
517 isConstCompatible(R600InstrInfo::BankSwizzle TransSwz,
518 const std::vector<std::pair<int, unsigned> > &TransOps,
519 unsigned ConstCount) {
520 // TransALU can't read 3 constants
523 for (unsigned i = 0, e = TransOps.size(); i < e; ++i) {
524 const std::pair<int, unsigned> &Src = TransOps[i];
525 unsigned Cycle = getTransSwizzle(TransSwz, i);
528 if (ConstCount > 0 && Cycle == 0)
530 if (ConstCount > 1 && Cycle == 1)
537 R600InstrInfo::fitsReadPortLimitations(const std::vector<MachineInstr *> &IG,
538 const DenseMap<unsigned, unsigned> &PV,
539 std::vector<BankSwizzle> &ValidSwizzle,
542 //Todo : support shared src0 - src1 operand
544 std::vector<std::vector<std::pair<int, unsigned> > > IGSrcs;
545 ValidSwizzle.clear();
547 BankSwizzle TransBS = ALU_VEC_012_SCL_210;
548 for (unsigned i = 0, e = IG.size(); i < e; ++i) {
549 IGSrcs.push_back(ExtractSrcs(IG[i], PV, ConstCount));
550 unsigned Op = getOperandIdx(IG[i]->getOpcode(),
551 AMDGPU::OpName::bank_swizzle);
552 ValidSwizzle.push_back( (R600InstrInfo::BankSwizzle)
553 IG[i]->getOperand(Op).getImm());
555 std::vector<std::pair<int, unsigned> > TransOps;
557 return FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps, TransBS);
559 TransOps = IGSrcs.back();
561 ValidSwizzle.pop_back();
563 static const R600InstrInfo::BankSwizzle TransSwz[] = {
569 for (unsigned i = 0; i < 4; i++) {
570 TransBS = TransSwz[i];
571 if (!isConstCompatible(TransBS, TransOps, ConstCount))
573 bool Result = FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps,
576 ValidSwizzle.push_back(TransBS);
586 R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts)
588 assert (Consts.size() <= 12 && "Too many operands in instructions group");
589 unsigned Pair1 = 0, Pair2 = 0;
590 for (unsigned i = 0, n = Consts.size(); i < n; ++i) {
591 unsigned ReadConstHalf = Consts[i] & 2;
592 unsigned ReadConstIndex = Consts[i] & (~3);
593 unsigned ReadHalfConst = ReadConstIndex | ReadConstHalf;
595 Pair1 = ReadHalfConst;
598 if (Pair1 == ReadHalfConst)
601 Pair2 = ReadHalfConst;
604 if (Pair2 != ReadHalfConst)
611 R600InstrInfo::fitsConstReadLimitations(const std::vector<MachineInstr *> &MIs)
613 std::vector<unsigned> Consts;
614 SmallSet<int64_t, 4> Literals;
615 for (unsigned i = 0, n = MIs.size(); i < n; i++) {
616 MachineInstr *MI = MIs[i];
617 if (!isALUInstr(MI->getOpcode()))
620 const SmallVectorImpl<std::pair<MachineOperand *, int64_t> > &Srcs =
623 for (unsigned j = 0, e = Srcs.size(); j < e; j++) {
624 std::pair<MachineOperand *, unsigned> Src = Srcs[j];
625 if (Src.first->getReg() == AMDGPU::ALU_LITERAL_X)
626 Literals.insert(Src.second);
627 if (Literals.size() > 4)
629 if (Src.first->getReg() == AMDGPU::ALU_CONST)
630 Consts.push_back(Src.second);
631 if (AMDGPU::R600_KC0RegClass.contains(Src.first->getReg()) ||
632 AMDGPU::R600_KC1RegClass.contains(Src.first->getReg())) {
633 unsigned Index = RI.getEncodingValue(Src.first->getReg()) & 0xff;
634 unsigned Chan = RI.getHWRegChan(Src.first->getReg());
635 Consts.push_back((Index << 2) | Chan);
639 return fitsConstReadLimitations(Consts);
642 DFAPacketizer *R600InstrInfo::CreateTargetScheduleState(const TargetMachine *TM,
643 const ScheduleDAG *DAG) const {
644 const InstrItineraryData *II = TM->getInstrItineraryData();
645 return TM->getSubtarget<AMDGPUSubtarget>().createDFAPacketizer(II);
649 isPredicateSetter(unsigned Opcode) {
658 static MachineInstr *
659 findFirstPredicateSetterFrom(MachineBasicBlock &MBB,
660 MachineBasicBlock::iterator I) {
661 while (I != MBB.begin()) {
663 MachineInstr *MI = I;
664 if (isPredicateSetter(MI->getOpcode()))
672 bool isJump(unsigned Opcode) {
673 return Opcode == AMDGPU::JUMP || Opcode == AMDGPU::JUMP_COND;
676 static bool isBranch(unsigned Opcode) {
677 return Opcode == AMDGPU::BRANCH || Opcode == AMDGPU::BRANCH_COND_i32 ||
678 Opcode == AMDGPU::BRANCH_COND_f32;
682 R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
683 MachineBasicBlock *&TBB,
684 MachineBasicBlock *&FBB,
685 SmallVectorImpl<MachineOperand> &Cond,
686 bool AllowModify) const {
687 // Most of the following comes from the ARM implementation of AnalyzeBranch
689 // If the block has no terminators, it just falls into the block after it.
690 MachineBasicBlock::iterator I = MBB.end();
691 if (I == MBB.begin())
694 while (I->isDebugValue()) {
695 if (I == MBB.begin())
699 // AMDGPU::BRANCH* instructions are only available after isel and are not
701 if (isBranch(I->getOpcode()))
703 if (!isJump(static_cast<MachineInstr *>(I)->getOpcode())) {
707 // Get the last instruction in the block.
708 MachineInstr *LastInst = I;
710 // If there is only one terminator instruction, process it.
711 unsigned LastOpc = LastInst->getOpcode();
712 if (I == MBB.begin() ||
713 !isJump(static_cast<MachineInstr *>(--I)->getOpcode())) {
714 if (LastOpc == AMDGPU::JUMP) {
715 TBB = LastInst->getOperand(0).getMBB();
717 } else if (LastOpc == AMDGPU::JUMP_COND) {
718 MachineInstr *predSet = I;
719 while (!isPredicateSetter(predSet->getOpcode())) {
722 TBB = LastInst->getOperand(0).getMBB();
723 Cond.push_back(predSet->getOperand(1));
724 Cond.push_back(predSet->getOperand(2));
725 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
728 return true; // Can't handle indirect branch.
731 // Get the instruction before it if it is a terminator.
732 MachineInstr *SecondLastInst = I;
733 unsigned SecondLastOpc = SecondLastInst->getOpcode();
735 // If the block ends with a B and a Bcc, handle it.
736 if (SecondLastOpc == AMDGPU::JUMP_COND && LastOpc == AMDGPU::JUMP) {
737 MachineInstr *predSet = --I;
738 while (!isPredicateSetter(predSet->getOpcode())) {
741 TBB = SecondLastInst->getOperand(0).getMBB();
742 FBB = LastInst->getOperand(0).getMBB();
743 Cond.push_back(predSet->getOperand(1));
744 Cond.push_back(predSet->getOperand(2));
745 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
749 // Otherwise, can't handle this.
753 int R600InstrInfo::getBranchInstr(const MachineOperand &op) const {
754 const MachineInstr *MI = op.getParent();
756 switch (MI->getDesc().OpInfo->RegClass) {
757 default: // FIXME: fallthrough??
758 case AMDGPU::GPRI32RegClassID: return AMDGPU::BRANCH_COND_i32;
759 case AMDGPU::GPRF32RegClassID: return AMDGPU::BRANCH_COND_f32;
764 MachineBasicBlock::iterator FindLastAluClause(MachineBasicBlock &MBB) {
765 for (MachineBasicBlock::reverse_iterator It = MBB.rbegin(), E = MBB.rend();
767 if (It->getOpcode() == AMDGPU::CF_ALU ||
768 It->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE)
769 return llvm::prior(It.base());
775 R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
776 MachineBasicBlock *TBB,
777 MachineBasicBlock *FBB,
778 const SmallVectorImpl<MachineOperand> &Cond,
780 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
784 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(TBB);
787 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
788 assert(PredSet && "No previous predicate !");
789 addFlag(PredSet, 0, MO_FLAG_PUSH);
790 PredSet->getOperand(2).setImm(Cond[1].getImm());
792 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
794 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
795 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
796 if (CfAlu == MBB.end())
798 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU);
799 CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE));
803 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
804 assert(PredSet && "No previous predicate !");
805 addFlag(PredSet, 0, MO_FLAG_PUSH);
806 PredSet->getOperand(2).setImm(Cond[1].getImm());
807 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
809 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
810 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(FBB);
811 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
812 if (CfAlu == MBB.end())
814 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU);
815 CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE));
821 R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
823 // Note : we leave PRED* instructions there.
824 // They may be needed when predicating instructions.
826 MachineBasicBlock::iterator I = MBB.end();
828 if (I == MBB.begin()) {
832 switch (I->getOpcode()) {
835 case AMDGPU::JUMP_COND: {
836 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
837 clearFlag(predSet, 0, MO_FLAG_PUSH);
838 I->eraseFromParent();
839 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
840 if (CfAlu == MBB.end())
842 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE);
843 CfAlu->setDesc(get(AMDGPU::CF_ALU));
847 I->eraseFromParent();
852 if (I == MBB.begin()) {
856 switch (I->getOpcode()) {
857 // FIXME: only one case??
860 case AMDGPU::JUMP_COND: {
861 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
862 clearFlag(predSet, 0, MO_FLAG_PUSH);
863 I->eraseFromParent();
864 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
865 if (CfAlu == MBB.end())
867 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE);
868 CfAlu->setDesc(get(AMDGPU::CF_ALU));
872 I->eraseFromParent();
879 R600InstrInfo::isPredicated(const MachineInstr *MI) const {
880 int idx = MI->findFirstPredOperandIdx();
884 unsigned Reg = MI->getOperand(idx).getReg();
886 default: return false;
887 case AMDGPU::PRED_SEL_ONE:
888 case AMDGPU::PRED_SEL_ZERO:
889 case AMDGPU::PREDICATE_BIT:
895 R600InstrInfo::isPredicable(MachineInstr *MI) const {
896 // XXX: KILL* instructions can be predicated, but they must be the last
897 // instruction in a clause, so this means any instructions after them cannot
898 // be predicated. Until we have proper support for instruction clauses in the
899 // backend, we will mark KILL* instructions as unpredicable.
901 if (MI->getOpcode() == AMDGPU::KILLGT) {
903 } else if (MI->getOpcode() == AMDGPU::CF_ALU) {
904 // If the clause start in the middle of MBB then the MBB has more
905 // than a single clause, unable to predicate several clauses.
906 if (MI->getParent()->begin() != MachineBasicBlock::iterator(MI))
908 // TODO: We don't support KC merging atm
909 if (MI->getOperand(3).getImm() != 0 || MI->getOperand(4).getImm() != 0)
912 } else if (isVector(*MI)) {
915 return AMDGPUInstrInfo::isPredicable(MI);
921 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
923 unsigned ExtraPredCycles,
924 const BranchProbability &Probability) const{
929 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
931 unsigned ExtraTCycles,
932 MachineBasicBlock &FMBB,
934 unsigned ExtraFCycles,
935 const BranchProbability &Probability) const {
940 R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
942 const BranchProbability &Probability)
948 R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
949 MachineBasicBlock &FMBB) const {
955 R600InstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
956 MachineOperand &MO = Cond[1];
957 switch (MO.getImm()) {
958 case OPCODE_IS_ZERO_INT:
959 MO.setImm(OPCODE_IS_NOT_ZERO_INT);
961 case OPCODE_IS_NOT_ZERO_INT:
962 MO.setImm(OPCODE_IS_ZERO_INT);
965 MO.setImm(OPCODE_IS_NOT_ZERO);
967 case OPCODE_IS_NOT_ZERO:
968 MO.setImm(OPCODE_IS_ZERO);
974 MachineOperand &MO2 = Cond[2];
975 switch (MO2.getReg()) {
976 case AMDGPU::PRED_SEL_ZERO:
977 MO2.setReg(AMDGPU::PRED_SEL_ONE);
979 case AMDGPU::PRED_SEL_ONE:
980 MO2.setReg(AMDGPU::PRED_SEL_ZERO);
989 R600InstrInfo::DefinesPredicate(MachineInstr *MI,
990 std::vector<MachineOperand> &Pred) const {
991 return isPredicateSetter(MI->getOpcode());
996 R600InstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
997 const SmallVectorImpl<MachineOperand> &Pred2) const {
1003 R600InstrInfo::PredicateInstruction(MachineInstr *MI,
1004 const SmallVectorImpl<MachineOperand> &Pred) const {
1005 int PIdx = MI->findFirstPredOperandIdx();
1007 if (MI->getOpcode() == AMDGPU::CF_ALU) {
1008 MI->getOperand(8).setImm(0);
1013 MachineOperand &PMO = MI->getOperand(PIdx);
1014 PMO.setReg(Pred[2].getReg());
1015 MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
1016 MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
1023 unsigned int R600InstrInfo::getPredicationCost(const MachineInstr *) const {
1027 unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1028 const MachineInstr *MI,
1029 unsigned *PredCost) const {
1035 void R600InstrInfo::reserveIndirectRegisters(BitVector &Reserved,
1036 const MachineFunction &MF) const {
1037 const AMDGPUFrameLowering *TFL =
1038 static_cast<const AMDGPUFrameLowering*>(TM.getFrameLowering());
1040 unsigned StackWidth = TFL->getStackWidth(MF);
1041 int End = getIndirectIndexEnd(MF);
1046 for (int Index = getIndirectIndexBegin(MF); Index <= End; ++Index) {
1047 unsigned SuperReg = AMDGPU::R600_Reg128RegClass.getRegister(Index);
1048 Reserved.set(SuperReg);
1049 for (unsigned Chan = 0; Chan < StackWidth; ++Chan) {
1050 unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister((4 * Index) + Chan);
1056 unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex,
1057 unsigned Channel) const {
1058 // XXX: Remove when we support a stack width > 2
1059 assert(Channel == 0);
1063 const TargetRegisterClass *R600InstrInfo::getIndirectAddrRegClass() const {
1064 return &AMDGPU::R600_TReg32_XRegClass;
1067 MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
1068 MachineBasicBlock::iterator I,
1069 unsigned ValueReg, unsigned Address,
1070 unsigned OffsetReg) const {
1071 unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address);
1072 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
1073 AMDGPU::AR_X, OffsetReg);
1074 setImmOperand(MOVA, AMDGPU::OpName::write, 0);
1076 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
1078 .addReg(AMDGPU::AR_X,
1079 RegState::Implicit | RegState::Kill);
1080 setImmOperand(Mov, AMDGPU::OpName::dst_rel, 1);
1084 MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
1085 MachineBasicBlock::iterator I,
1086 unsigned ValueReg, unsigned Address,
1087 unsigned OffsetReg) const {
1088 unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address);
1089 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
1092 setImmOperand(MOVA, AMDGPU::OpName::write, 0);
1093 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
1096 .addReg(AMDGPU::AR_X,
1097 RegState::Implicit | RegState::Kill);
1098 setImmOperand(Mov, AMDGPU::OpName::src0_rel, 1);
1103 unsigned R600InstrInfo::getMaxAlusPerClause() const {
1107 MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MBB,
1108 MachineBasicBlock::iterator I,
1112 unsigned Src1Reg) const {
1113 MachineInstrBuilder MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opcode),
1117 MIB.addImm(0) // $update_exec_mask
1118 .addImm(0); // $update_predicate
1120 MIB.addImm(1) // $write
1122 .addImm(0) // $dst_rel
1123 .addImm(0) // $dst_clamp
1124 .addReg(Src0Reg) // $src0
1125 .addImm(0) // $src0_neg
1126 .addImm(0) // $src0_rel
1127 .addImm(0) // $src0_abs
1128 .addImm(-1); // $src0_sel
1131 MIB.addReg(Src1Reg) // $src1
1132 .addImm(0) // $src1_neg
1133 .addImm(0) // $src1_rel
1134 .addImm(0) // $src1_abs
1135 .addImm(-1); // $src1_sel
1138 //XXX: The r600g finalizer expects this to be 1, once we've moved the
1139 //scheduling to the backend, we can change the default to 0.
1140 MIB.addImm(1) // $last
1141 .addReg(AMDGPU::PRED_SEL_OFF) // $pred_sel
1142 .addImm(0) // $literal
1143 .addImm(0); // $bank_swizzle
1148 #define OPERAND_CASE(Label) \
1150 static const unsigned Ops[] = \
1160 static unsigned getSlotedOps(unsigned Op, unsigned Slot) {
1162 OPERAND_CASE(AMDGPU::OpName::update_exec_mask)
1163 OPERAND_CASE(AMDGPU::OpName::update_pred)
1164 OPERAND_CASE(AMDGPU::OpName::write)
1165 OPERAND_CASE(AMDGPU::OpName::omod)
1166 OPERAND_CASE(AMDGPU::OpName::dst_rel)
1167 OPERAND_CASE(AMDGPU::OpName::clamp)
1168 OPERAND_CASE(AMDGPU::OpName::src0)
1169 OPERAND_CASE(AMDGPU::OpName::src0_neg)
1170 OPERAND_CASE(AMDGPU::OpName::src0_rel)
1171 OPERAND_CASE(AMDGPU::OpName::src0_abs)
1172 OPERAND_CASE(AMDGPU::OpName::src0_sel)
1173 OPERAND_CASE(AMDGPU::OpName::src1)
1174 OPERAND_CASE(AMDGPU::OpName::src1_neg)
1175 OPERAND_CASE(AMDGPU::OpName::src1_rel)
1176 OPERAND_CASE(AMDGPU::OpName::src1_abs)
1177 OPERAND_CASE(AMDGPU::OpName::src1_sel)
1178 OPERAND_CASE(AMDGPU::OpName::pred_sel)
1180 llvm_unreachable("Wrong Operand");
1186 MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction(
1187 MachineBasicBlock &MBB, MachineInstr *MI, unsigned Slot, unsigned DstReg)
1189 assert (MI->getOpcode() == AMDGPU::DOT_4 && "Not Implemented");
1191 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
1192 if (ST.getGeneration() <= AMDGPUSubtarget::R700)
1193 Opcode = AMDGPU::DOT4_r600;
1195 Opcode = AMDGPU::DOT4_eg;
1196 MachineBasicBlock::iterator I = MI;
1197 MachineOperand &Src0 = MI->getOperand(
1198 getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src0, Slot)));
1199 MachineOperand &Src1 = MI->getOperand(
1200 getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src1, Slot)));
1201 MachineInstr *MIB = buildDefaultInstruction(
1202 MBB, I, Opcode, DstReg, Src0.getReg(), Src1.getReg());
1203 static const unsigned Operands[14] = {
1204 AMDGPU::OpName::update_exec_mask,
1205 AMDGPU::OpName::update_pred,
1206 AMDGPU::OpName::write,
1207 AMDGPU::OpName::omod,
1208 AMDGPU::OpName::dst_rel,
1209 AMDGPU::OpName::clamp,
1210 AMDGPU::OpName::src0_neg,
1211 AMDGPU::OpName::src0_rel,
1212 AMDGPU::OpName::src0_abs,
1213 AMDGPU::OpName::src0_sel,
1214 AMDGPU::OpName::src1_neg,
1215 AMDGPU::OpName::src1_rel,
1216 AMDGPU::OpName::src1_abs,
1217 AMDGPU::OpName::src1_sel,
1220 for (unsigned i = 0; i < 14; i++) {
1221 MachineOperand &MO = MI->getOperand(
1222 getOperandIdx(MI->getOpcode(), getSlotedOps(Operands[i], Slot)));
1223 assert (MO.isImm());
1224 setImmOperand(MIB, Operands[i], MO.getImm());
1226 MIB->getOperand(20).setImm(0);
1230 MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
1231 MachineBasicBlock::iterator I,
1233 uint64_t Imm) const {
1234 MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg,
1235 AMDGPU::ALU_LITERAL_X);
1236 setImmOperand(MovImm, AMDGPU::OpName::literal, Imm);
1240 MachineInstr *R600InstrInfo::buildMovInstr(MachineBasicBlock *MBB,
1241 MachineBasicBlock::iterator I,
1242 unsigned DstReg, unsigned SrcReg) const {
1243 return buildDefaultInstruction(*MBB, I, AMDGPU::MOV, DstReg, SrcReg);
1246 int R600InstrInfo::getOperandIdx(const MachineInstr &MI, unsigned Op) const {
1247 return getOperandIdx(MI.getOpcode(), Op);
1250 int R600InstrInfo::getOperandIdx(unsigned Opcode, unsigned Op) const {
1251 return AMDGPU::getNamedOperandIdx(Opcode, Op);
1254 void R600InstrInfo::setImmOperand(MachineInstr *MI, unsigned Op,
1255 int64_t Imm) const {
1256 int Idx = getOperandIdx(*MI, Op);
1257 assert(Idx != -1 && "Operand not supported for this instruction.");
1258 assert(MI->getOperand(Idx).isImm());
1259 MI->getOperand(Idx).setImm(Imm);
1262 //===----------------------------------------------------------------------===//
1263 // Instruction flag getters/setters
1264 //===----------------------------------------------------------------------===//
1266 bool R600InstrInfo::hasFlagOperand(const MachineInstr &MI) const {
1267 return GET_FLAG_OPERAND_IDX(get(MI.getOpcode()).TSFlags) != 0;
1270 MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
1271 unsigned Flag) const {
1272 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
1275 // If we pass something other than the default value of Flag to this
1276 // function, it means we are want to set a flag on an instruction
1277 // that uses native encoding.
1278 assert(HAS_NATIVE_OPERANDS(TargetFlags));
1279 bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3;
1282 FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::clamp);
1285 FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::write);
1287 case MO_FLAG_NOT_LAST:
1289 FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::last);
1293 case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_neg); break;
1294 case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_neg); break;
1295 case 2: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src2_neg); break;
1300 assert(!IsOP3 && "Cannot set absolute value modifier for OP3 "
1304 case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_abs); break;
1305 case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_abs); break;
1313 assert(FlagIndex != -1 && "Flag not supported for this instruction");
1315 FlagIndex = GET_FLAG_OPERAND_IDX(TargetFlags);
1316 assert(FlagIndex != 0 &&
1317 "Instruction flags not supported for this instruction");
1320 MachineOperand &FlagOp = MI->getOperand(FlagIndex);
1321 assert(FlagOp.isImm());
1325 void R600InstrInfo::addFlag(MachineInstr *MI, unsigned Operand,
1326 unsigned Flag) const {
1327 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
1331 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
1332 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1333 if (Flag == MO_FLAG_NOT_LAST) {
1334 clearFlag(MI, Operand, MO_FLAG_LAST);
1335 } else if (Flag == MO_FLAG_MASK) {
1336 clearFlag(MI, Operand, Flag);
1341 MachineOperand &FlagOp = getFlagOp(MI, Operand);
1342 FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * Operand)));
1346 void R600InstrInfo::clearFlag(MachineInstr *MI, unsigned Operand,
1347 unsigned Flag) const {
1348 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
1349 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
1350 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1353 MachineOperand &FlagOp = getFlagOp(MI);
1354 unsigned InstFlags = FlagOp.getImm();
1355 InstFlags &= ~(Flag << (NUM_MO_FLAGS * Operand));
1356 FlagOp.setImm(InstFlags);