1 //===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief R600 Implementation of TargetInstrInfo.
13 //===----------------------------------------------------------------------===//
15 #include "R600InstrInfo.h"
17 #include "AMDGPUSubtarget.h"
18 #include "AMDGPUTargetMachine.h"
19 #include "R600Defines.h"
20 #include "R600MachineFunctionInfo.h"
21 #include "R600RegisterInfo.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #define GET_INSTRINFO_CTOR
27 #include "AMDGPUGenDFAPacketizer.inc"
31 R600InstrInfo::R600InstrInfo(AMDGPUTargetMachine &tm)
32 : AMDGPUInstrInfo(tm),
34 ST(tm.getSubtarget<AMDGPUSubtarget>())
37 const R600RegisterInfo &R600InstrInfo::getRegisterInfo() const {
41 bool R600InstrInfo::isTrig(const MachineInstr &MI) const {
42 return get(MI.getOpcode()).TSFlags & R600_InstFlag::TRIG;
45 bool R600InstrInfo::isVector(const MachineInstr &MI) const {
46 return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR;
50 R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
51 MachineBasicBlock::iterator MI, DebugLoc DL,
52 unsigned DestReg, unsigned SrcReg,
54 unsigned VectorComponents = 0;
55 if (AMDGPU::R600_Reg128RegClass.contains(DestReg) &&
56 AMDGPU::R600_Reg128RegClass.contains(SrcReg)) {
58 } else if(AMDGPU::R600_Reg64RegClass.contains(DestReg) &&
59 AMDGPU::R600_Reg64RegClass.contains(SrcReg)) {
63 if (VectorComponents > 0) {
64 for (unsigned I = 0; I < VectorComponents; I++) {
65 unsigned SubRegIndex = RI.getSubRegFromChannel(I);
66 buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
67 RI.getSubReg(DestReg, SubRegIndex),
68 RI.getSubReg(SrcReg, SubRegIndex))
70 RegState::Define | RegState::Implicit);
73 MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
75 NewMI->getOperand(getOperandIdx(*NewMI, AMDGPU::OpName::src0))
80 MachineInstr * R600InstrInfo::getMovImmInstr(MachineFunction *MF,
81 unsigned DstReg, int64_t Imm) const {
82 MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::MOV), DebugLoc());
83 MachineInstrBuilder MIB(*MF, MI);
84 MIB.addReg(DstReg, RegState::Define);
85 MIB.addReg(AMDGPU::ALU_LITERAL_X);
87 MIB.addReg(0); // PREDICATE_BIT
92 unsigned R600InstrInfo::getIEQOpcode() const {
93 return AMDGPU::SETE_INT;
96 bool R600InstrInfo::isMov(unsigned Opcode) const {
100 default: return false;
102 case AMDGPU::MOV_IMM_F32:
103 case AMDGPU::MOV_IMM_I32:
108 // Some instructions act as place holders to emulate operations that the GPU
109 // hardware does automatically. This function can be used to check if
110 // an opcode falls into this category.
111 bool R600InstrInfo::isPlaceHolderOpcode(unsigned Opcode) const {
113 default: return false;
119 bool R600InstrInfo::isReductionOp(unsigned Opcode) const {
123 bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
125 default: return false;
126 case AMDGPU::CUBE_r600_pseudo:
127 case AMDGPU::CUBE_r600_real:
128 case AMDGPU::CUBE_eg_pseudo:
129 case AMDGPU::CUBE_eg_real:
134 bool R600InstrInfo::isALUInstr(unsigned Opcode) const {
135 unsigned TargetFlags = get(Opcode).TSFlags;
137 return (TargetFlags & R600_InstFlag::ALU_INST);
140 bool R600InstrInfo::hasInstrModifiers(unsigned Opcode) const {
141 unsigned TargetFlags = get(Opcode).TSFlags;
143 return ((TargetFlags & R600_InstFlag::OP1) |
144 (TargetFlags & R600_InstFlag::OP2) |
145 (TargetFlags & R600_InstFlag::OP3));
148 bool R600InstrInfo::isLDSInstr(unsigned Opcode) const {
149 unsigned TargetFlags = get(Opcode).TSFlags;
151 return ((TargetFlags & R600_InstFlag::LDS_1A) |
152 (TargetFlags & R600_InstFlag::LDS_1A1D) |
153 (TargetFlags & R600_InstFlag::LDS_1A2D));
156 bool R600InstrInfo::isTransOnly(unsigned Opcode) const {
157 if (ST.hasCaymanISA())
159 return (get(Opcode).getSchedClass() == AMDGPU::Sched::TransALU);
162 bool R600InstrInfo::isTransOnly(const MachineInstr *MI) const {
163 return isTransOnly(MI->getOpcode());
166 bool R600InstrInfo::isVectorOnly(unsigned Opcode) const {
167 return (get(Opcode).getSchedClass() == AMDGPU::Sched::VecALU);
170 bool R600InstrInfo::isVectorOnly(const MachineInstr *MI) const {
171 return isVectorOnly(MI->getOpcode());
174 bool R600InstrInfo::isExport(unsigned Opcode) const {
175 return (get(Opcode).TSFlags & R600_InstFlag::IS_EXPORT);
178 bool R600InstrInfo::usesVertexCache(unsigned Opcode) const {
179 return ST.hasVertexCache() && IS_VTX(get(Opcode));
182 bool R600InstrInfo::usesVertexCache(const MachineInstr *MI) const {
183 const R600MachineFunctionInfo *MFI = MI->getParent()->getParent()->getInfo<R600MachineFunctionInfo>();
184 return MFI->ShaderType != ShaderType::COMPUTE && usesVertexCache(MI->getOpcode());
187 bool R600InstrInfo::usesTextureCache(unsigned Opcode) const {
188 return (!ST.hasVertexCache() && IS_VTX(get(Opcode))) || IS_TEX(get(Opcode));
191 bool R600InstrInfo::usesTextureCache(const MachineInstr *MI) const {
192 const R600MachineFunctionInfo *MFI = MI->getParent()->getParent()->getInfo<R600MachineFunctionInfo>();
193 return (MFI->ShaderType == ShaderType::COMPUTE && usesVertexCache(MI->getOpcode())) ||
194 usesTextureCache(MI->getOpcode());
197 bool R600InstrInfo::mustBeLastInClause(unsigned Opcode) const {
200 case AMDGPU::GROUP_BARRIER:
207 bool R600InstrInfo::readsLDSSrcReg(const MachineInstr *MI) const {
208 if (!isALUInstr(MI->getOpcode())) {
211 for (MachineInstr::const_mop_iterator I = MI->operands_begin(),
212 E = MI->operands_end(); I != E; ++I) {
213 if (!I->isReg() || !I->isUse() ||
214 TargetRegisterInfo::isVirtualRegister(I->getReg()))
217 if (AMDGPU::R600_LDS_SRC_REGRegClass.contains(I->getReg()))
223 int R600InstrInfo::getSrcIdx(unsigned Opcode, unsigned SrcNum) const {
224 static const unsigned OpTable[] = {
225 AMDGPU::OpName::src0,
226 AMDGPU::OpName::src1,
231 return getOperandIdx(Opcode, OpTable[SrcNum]);
234 #define SRC_SEL_ROWS 11
235 int R600InstrInfo::getSelIdx(unsigned Opcode, unsigned SrcIdx) const {
236 static const unsigned SrcSelTable[SRC_SEL_ROWS][2] = {
237 {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
238 {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel},
239 {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel},
240 {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
241 {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
242 {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z},
243 {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W},
244 {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X},
245 {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y},
246 {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z},
247 {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W}
250 for (unsigned i = 0; i < SRC_SEL_ROWS; ++i) {
251 if (getOperandIdx(Opcode, SrcSelTable[i][0]) == (int)SrcIdx) {
252 return getOperandIdx(Opcode, SrcSelTable[i][1]);
259 SmallVector<std::pair<MachineOperand *, int64_t>, 3>
260 R600InstrInfo::getSrcs(MachineInstr *MI) const {
261 SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result;
263 if (MI->getOpcode() == AMDGPU::DOT_4) {
264 static const unsigned OpTable[8][2] = {
265 {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
266 {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
267 {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z},
268 {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W},
269 {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X},
270 {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y},
271 {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z},
272 {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W},
275 for (unsigned j = 0; j < 8; j++) {
276 MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(),
278 unsigned Reg = MO.getReg();
279 if (Reg == AMDGPU::ALU_CONST) {
280 unsigned Sel = MI->getOperand(getOperandIdx(MI->getOpcode(),
281 OpTable[j][1])).getImm();
282 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel));
290 static const unsigned OpTable[3][2] = {
291 {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
292 {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel},
293 {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel},
296 for (unsigned j = 0; j < 3; j++) {
297 int SrcIdx = getOperandIdx(MI->getOpcode(), OpTable[j][0]);
300 MachineOperand &MO = MI->getOperand(SrcIdx);
301 unsigned Reg = MI->getOperand(SrcIdx).getReg();
302 if (Reg == AMDGPU::ALU_CONST) {
303 unsigned Sel = MI->getOperand(
304 getOperandIdx(MI->getOpcode(), OpTable[j][1])).getImm();
305 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel));
308 if (Reg == AMDGPU::ALU_LITERAL_X) {
309 unsigned Imm = MI->getOperand(
310 getOperandIdx(MI->getOpcode(), AMDGPU::OpName::literal)).getImm();
311 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Imm));
314 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, 0));
319 std::vector<std::pair<int, unsigned> >
320 R600InstrInfo::ExtractSrcs(MachineInstr *MI,
321 const DenseMap<unsigned, unsigned> &PV,
322 unsigned &ConstCount) const {
324 const SmallVector<std::pair<MachineOperand *, int64_t>, 3> Srcs = getSrcs(MI);
325 const std::pair<int, unsigned> DummyPair(-1, 0);
326 std::vector<std::pair<int, unsigned> > Result;
328 for (unsigned n = Srcs.size(); i < n; ++i) {
329 unsigned Reg = Srcs[i].first->getReg();
330 unsigned Index = RI.getEncodingValue(Reg) & 0xff;
331 if (Reg == AMDGPU::OQAP) {
332 Result.push_back(std::pair<int, unsigned>(Index, 0));
334 if (PV.find(Reg) != PV.end()) {
335 // 255 is used to tells its a PS/PV reg
336 Result.push_back(std::pair<int, unsigned>(255, 0));
341 Result.push_back(DummyPair);
344 unsigned Chan = RI.getHWRegChan(Reg);
345 Result.push_back(std::pair<int, unsigned>(Index, Chan));
348 Result.push_back(DummyPair);
352 static std::vector<std::pair<int, unsigned> >
353 Swizzle(std::vector<std::pair<int, unsigned> > Src,
354 R600InstrInfo::BankSwizzle Swz) {
355 if (Src[0] == Src[1])
358 case R600InstrInfo::ALU_VEC_012_SCL_210:
360 case R600InstrInfo::ALU_VEC_021_SCL_122:
361 std::swap(Src[1], Src[2]);
363 case R600InstrInfo::ALU_VEC_102_SCL_221:
364 std::swap(Src[0], Src[1]);
366 case R600InstrInfo::ALU_VEC_120_SCL_212:
367 std::swap(Src[0], Src[1]);
368 std::swap(Src[0], Src[2]);
370 case R600InstrInfo::ALU_VEC_201:
371 std::swap(Src[0], Src[2]);
372 std::swap(Src[0], Src[1]);
374 case R600InstrInfo::ALU_VEC_210:
375 std::swap(Src[0], Src[2]);
382 getTransSwizzle(R600InstrInfo::BankSwizzle Swz, unsigned Op) {
384 case R600InstrInfo::ALU_VEC_012_SCL_210: {
385 unsigned Cycles[3] = { 2, 1, 0};
388 case R600InstrInfo::ALU_VEC_021_SCL_122: {
389 unsigned Cycles[3] = { 1, 2, 2};
392 case R600InstrInfo::ALU_VEC_120_SCL_212: {
393 unsigned Cycles[3] = { 2, 1, 2};
396 case R600InstrInfo::ALU_VEC_102_SCL_221: {
397 unsigned Cycles[3] = { 2, 2, 1};
401 llvm_unreachable("Wrong Swizzle for Trans Slot");
406 /// returns how many MIs (whose inputs are represented by IGSrcs) can be packed
407 /// in the same Instruction Group while meeting read port limitations given a
408 /// Swz swizzle sequence.
409 unsigned R600InstrInfo::isLegalUpTo(
410 const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
411 const std::vector<R600InstrInfo::BankSwizzle> &Swz,
412 const std::vector<std::pair<int, unsigned> > &TransSrcs,
413 R600InstrInfo::BankSwizzle TransSwz) const {
415 memset(Vector, -1, sizeof(Vector));
416 for (unsigned i = 0, e = IGSrcs.size(); i < e; i++) {
417 const std::vector<std::pair<int, unsigned> > &Srcs =
418 Swizzle(IGSrcs[i], Swz[i]);
419 for (unsigned j = 0; j < 3; j++) {
420 const std::pair<int, unsigned> &Src = Srcs[j];
421 if (Src.first < 0 || Src.first == 255)
423 if (Src.first == GET_REG_INDEX(RI.getEncodingValue(AMDGPU::OQAP))) {
424 if (Swz[i] != R600InstrInfo::ALU_VEC_012_SCL_210 &&
425 Swz[i] != R600InstrInfo::ALU_VEC_021_SCL_122) {
426 // The value from output queue A (denoted by register OQAP) can
427 // only be fetched during the first cycle.
430 // OQAP does not count towards the normal read port restrictions
433 if (Vector[Src.second][j] < 0)
434 Vector[Src.second][j] = Src.first;
435 if (Vector[Src.second][j] != Src.first)
439 // Now check Trans Alu
440 for (unsigned i = 0, e = TransSrcs.size(); i < e; ++i) {
441 const std::pair<int, unsigned> &Src = TransSrcs[i];
442 unsigned Cycle = getTransSwizzle(TransSwz, i);
445 if (Src.first == 255)
447 if (Vector[Src.second][Cycle] < 0)
448 Vector[Src.second][Cycle] = Src.first;
449 if (Vector[Src.second][Cycle] != Src.first)
450 return IGSrcs.size() - 1;
452 return IGSrcs.size();
455 /// Given a swizzle sequence SwzCandidate and an index Idx, returns the next
456 /// (in lexicographic term) swizzle sequence assuming that all swizzles after
457 /// Idx can be skipped
459 NextPossibleSolution(
460 std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
462 assert(Idx < SwzCandidate.size());
464 while (ResetIdx > -1 && SwzCandidate[ResetIdx] == R600InstrInfo::ALU_VEC_210)
466 for (unsigned i = ResetIdx + 1, e = SwzCandidate.size(); i < e; i++) {
467 SwzCandidate[i] = R600InstrInfo::ALU_VEC_012_SCL_210;
471 int NextSwizzle = SwzCandidate[ResetIdx] + 1;
472 SwzCandidate[ResetIdx] = (R600InstrInfo::BankSwizzle)NextSwizzle;
476 /// Enumerate all possible Swizzle sequence to find one that can meet all
477 /// read port requirements.
478 bool R600InstrInfo::FindSwizzleForVectorSlot(
479 const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
480 std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
481 const std::vector<std::pair<int, unsigned> > &TransSrcs,
482 R600InstrInfo::BankSwizzle TransSwz) const {
483 unsigned ValidUpTo = 0;
485 ValidUpTo = isLegalUpTo(IGSrcs, SwzCandidate, TransSrcs, TransSwz);
486 if (ValidUpTo == IGSrcs.size())
488 } while (NextPossibleSolution(SwzCandidate, ValidUpTo));
492 /// Instructions in Trans slot can't read gpr at cycle 0 if they also read
493 /// a const, and can't read a gpr at cycle 1 if they read 2 const.
495 isConstCompatible(R600InstrInfo::BankSwizzle TransSwz,
496 const std::vector<std::pair<int, unsigned> > &TransOps,
497 unsigned ConstCount) {
498 // TransALU can't read 3 constants
501 for (unsigned i = 0, e = TransOps.size(); i < e; ++i) {
502 const std::pair<int, unsigned> &Src = TransOps[i];
503 unsigned Cycle = getTransSwizzle(TransSwz, i);
506 if (ConstCount > 0 && Cycle == 0)
508 if (ConstCount > 1 && Cycle == 1)
515 R600InstrInfo::fitsReadPortLimitations(const std::vector<MachineInstr *> &IG,
516 const DenseMap<unsigned, unsigned> &PV,
517 std::vector<BankSwizzle> &ValidSwizzle,
520 //Todo : support shared src0 - src1 operand
522 std::vector<std::vector<std::pair<int, unsigned> > > IGSrcs;
523 ValidSwizzle.clear();
525 BankSwizzle TransBS = ALU_VEC_012_SCL_210;
526 for (unsigned i = 0, e = IG.size(); i < e; ++i) {
527 IGSrcs.push_back(ExtractSrcs(IG[i], PV, ConstCount));
528 unsigned Op = getOperandIdx(IG[i]->getOpcode(),
529 AMDGPU::OpName::bank_swizzle);
530 ValidSwizzle.push_back( (R600InstrInfo::BankSwizzle)
531 IG[i]->getOperand(Op).getImm());
533 std::vector<std::pair<int, unsigned> > TransOps;
535 return FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps, TransBS);
537 TransOps = IGSrcs.back();
539 ValidSwizzle.pop_back();
541 static const R600InstrInfo::BankSwizzle TransSwz[] = {
547 for (unsigned i = 0; i < 4; i++) {
548 TransBS = TransSwz[i];
549 if (!isConstCompatible(TransBS, TransOps, ConstCount))
551 bool Result = FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps,
554 ValidSwizzle.push_back(TransBS);
564 R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts)
566 assert (Consts.size() <= 12 && "Too many operands in instructions group");
567 unsigned Pair1 = 0, Pair2 = 0;
568 for (unsigned i = 0, n = Consts.size(); i < n; ++i) {
569 unsigned ReadConstHalf = Consts[i] & 2;
570 unsigned ReadConstIndex = Consts[i] & (~3);
571 unsigned ReadHalfConst = ReadConstIndex | ReadConstHalf;
573 Pair1 = ReadHalfConst;
576 if (Pair1 == ReadHalfConst)
579 Pair2 = ReadHalfConst;
582 if (Pair2 != ReadHalfConst)
589 R600InstrInfo::fitsConstReadLimitations(const std::vector<MachineInstr *> &MIs)
591 std::vector<unsigned> Consts;
592 SmallSet<int64_t, 4> Literals;
593 for (unsigned i = 0, n = MIs.size(); i < n; i++) {
594 MachineInstr *MI = MIs[i];
595 if (!isALUInstr(MI->getOpcode()))
598 const SmallVectorImpl<std::pair<MachineOperand *, int64_t> > &Srcs =
601 for (unsigned j = 0, e = Srcs.size(); j < e; j++) {
602 std::pair<MachineOperand *, unsigned> Src = Srcs[j];
603 if (Src.first->getReg() == AMDGPU::ALU_LITERAL_X)
604 Literals.insert(Src.second);
605 if (Literals.size() > 4)
607 if (Src.first->getReg() == AMDGPU::ALU_CONST)
608 Consts.push_back(Src.second);
609 if (AMDGPU::R600_KC0RegClass.contains(Src.first->getReg()) ||
610 AMDGPU::R600_KC1RegClass.contains(Src.first->getReg())) {
611 unsigned Index = RI.getEncodingValue(Src.first->getReg()) & 0xff;
612 unsigned Chan = RI.getHWRegChan(Src.first->getReg());
613 Consts.push_back((Index << 2) | Chan);
617 return fitsConstReadLimitations(Consts);
620 DFAPacketizer *R600InstrInfo::CreateTargetScheduleState(const TargetMachine *TM,
621 const ScheduleDAG *DAG) const {
622 const InstrItineraryData *II = TM->getInstrItineraryData();
623 return TM->getSubtarget<AMDGPUSubtarget>().createDFAPacketizer(II);
627 isPredicateSetter(unsigned Opcode) {
636 static MachineInstr *
637 findFirstPredicateSetterFrom(MachineBasicBlock &MBB,
638 MachineBasicBlock::iterator I) {
639 while (I != MBB.begin()) {
641 MachineInstr *MI = I;
642 if (isPredicateSetter(MI->getOpcode()))
650 bool isJump(unsigned Opcode) {
651 return Opcode == AMDGPU::JUMP || Opcode == AMDGPU::JUMP_COND;
655 R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
656 MachineBasicBlock *&TBB,
657 MachineBasicBlock *&FBB,
658 SmallVectorImpl<MachineOperand> &Cond,
659 bool AllowModify) const {
660 // Most of the following comes from the ARM implementation of AnalyzeBranch
662 // If the block has no terminators, it just falls into the block after it.
663 MachineBasicBlock::iterator I = MBB.end();
664 if (I == MBB.begin())
667 while (I->isDebugValue()) {
668 if (I == MBB.begin())
672 if (!isJump(static_cast<MachineInstr *>(I)->getOpcode())) {
676 // Get the last instruction in the block.
677 MachineInstr *LastInst = I;
679 // If there is only one terminator instruction, process it.
680 unsigned LastOpc = LastInst->getOpcode();
681 if (I == MBB.begin() ||
682 !isJump(static_cast<MachineInstr *>(--I)->getOpcode())) {
683 if (LastOpc == AMDGPU::JUMP) {
684 TBB = LastInst->getOperand(0).getMBB();
686 } else if (LastOpc == AMDGPU::JUMP_COND) {
687 MachineInstr *predSet = I;
688 while (!isPredicateSetter(predSet->getOpcode())) {
691 TBB = LastInst->getOperand(0).getMBB();
692 Cond.push_back(predSet->getOperand(1));
693 Cond.push_back(predSet->getOperand(2));
694 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
697 return true; // Can't handle indirect branch.
700 // Get the instruction before it if it is a terminator.
701 MachineInstr *SecondLastInst = I;
702 unsigned SecondLastOpc = SecondLastInst->getOpcode();
704 // If the block ends with a B and a Bcc, handle it.
705 if (SecondLastOpc == AMDGPU::JUMP_COND && LastOpc == AMDGPU::JUMP) {
706 MachineInstr *predSet = --I;
707 while (!isPredicateSetter(predSet->getOpcode())) {
710 TBB = SecondLastInst->getOperand(0).getMBB();
711 FBB = LastInst->getOperand(0).getMBB();
712 Cond.push_back(predSet->getOperand(1));
713 Cond.push_back(predSet->getOperand(2));
714 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
718 // Otherwise, can't handle this.
722 int R600InstrInfo::getBranchInstr(const MachineOperand &op) const {
723 const MachineInstr *MI = op.getParent();
725 switch (MI->getDesc().OpInfo->RegClass) {
726 default: // FIXME: fallthrough??
727 case AMDGPU::GPRI32RegClassID: return AMDGPU::BRANCH_COND_i32;
728 case AMDGPU::GPRF32RegClassID: return AMDGPU::BRANCH_COND_f32;
733 MachineBasicBlock::iterator FindLastAluClause(MachineBasicBlock &MBB) {
734 for (MachineBasicBlock::reverse_iterator It = MBB.rbegin(), E = MBB.rend();
736 if (It->getOpcode() == AMDGPU::CF_ALU ||
737 It->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE)
738 return llvm::prior(It.base());
744 R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
745 MachineBasicBlock *TBB,
746 MachineBasicBlock *FBB,
747 const SmallVectorImpl<MachineOperand> &Cond,
749 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
753 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(TBB);
756 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
757 assert(PredSet && "No previous predicate !");
758 addFlag(PredSet, 0, MO_FLAG_PUSH);
759 PredSet->getOperand(2).setImm(Cond[1].getImm());
761 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
763 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
764 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
765 if (CfAlu == MBB.end())
767 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU);
768 CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE));
772 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
773 assert(PredSet && "No previous predicate !");
774 addFlag(PredSet, 0, MO_FLAG_PUSH);
775 PredSet->getOperand(2).setImm(Cond[1].getImm());
776 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
778 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
779 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(FBB);
780 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
781 if (CfAlu == MBB.end())
783 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU);
784 CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE));
790 R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
792 // Note : we leave PRED* instructions there.
793 // They may be needed when predicating instructions.
795 MachineBasicBlock::iterator I = MBB.end();
797 if (I == MBB.begin()) {
801 switch (I->getOpcode()) {
804 case AMDGPU::JUMP_COND: {
805 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
806 clearFlag(predSet, 0, MO_FLAG_PUSH);
807 I->eraseFromParent();
808 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
809 if (CfAlu == MBB.end())
811 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE);
812 CfAlu->setDesc(get(AMDGPU::CF_ALU));
816 I->eraseFromParent();
821 if (I == MBB.begin()) {
825 switch (I->getOpcode()) {
826 // FIXME: only one case??
829 case AMDGPU::JUMP_COND: {
830 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
831 clearFlag(predSet, 0, MO_FLAG_PUSH);
832 I->eraseFromParent();
833 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
834 if (CfAlu == MBB.end())
836 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE);
837 CfAlu->setDesc(get(AMDGPU::CF_ALU));
841 I->eraseFromParent();
848 R600InstrInfo::isPredicated(const MachineInstr *MI) const {
849 int idx = MI->findFirstPredOperandIdx();
853 unsigned Reg = MI->getOperand(idx).getReg();
855 default: return false;
856 case AMDGPU::PRED_SEL_ONE:
857 case AMDGPU::PRED_SEL_ZERO:
858 case AMDGPU::PREDICATE_BIT:
864 R600InstrInfo::isPredicable(MachineInstr *MI) const {
865 // XXX: KILL* instructions can be predicated, but they must be the last
866 // instruction in a clause, so this means any instructions after them cannot
867 // be predicated. Until we have proper support for instruction clauses in the
868 // backend, we will mark KILL* instructions as unpredicable.
870 if (MI->getOpcode() == AMDGPU::KILLGT) {
872 } else if (MI->getOpcode() == AMDGPU::CF_ALU) {
873 // If the clause start in the middle of MBB then the MBB has more
874 // than a single clause, unable to predicate several clauses.
875 if (MI->getParent()->begin() != MachineBasicBlock::iterator(MI))
877 // TODO: We don't support KC merging atm
878 if (MI->getOperand(3).getImm() != 0 || MI->getOperand(4).getImm() != 0)
881 } else if (isVector(*MI)) {
884 return AMDGPUInstrInfo::isPredicable(MI);
890 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
892 unsigned ExtraPredCycles,
893 const BranchProbability &Probability) const{
898 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
900 unsigned ExtraTCycles,
901 MachineBasicBlock &FMBB,
903 unsigned ExtraFCycles,
904 const BranchProbability &Probability) const {
909 R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
911 const BranchProbability &Probability)
917 R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
918 MachineBasicBlock &FMBB) const {
924 R600InstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
925 MachineOperand &MO = Cond[1];
926 switch (MO.getImm()) {
927 case OPCODE_IS_ZERO_INT:
928 MO.setImm(OPCODE_IS_NOT_ZERO_INT);
930 case OPCODE_IS_NOT_ZERO_INT:
931 MO.setImm(OPCODE_IS_ZERO_INT);
934 MO.setImm(OPCODE_IS_NOT_ZERO);
936 case OPCODE_IS_NOT_ZERO:
937 MO.setImm(OPCODE_IS_ZERO);
943 MachineOperand &MO2 = Cond[2];
944 switch (MO2.getReg()) {
945 case AMDGPU::PRED_SEL_ZERO:
946 MO2.setReg(AMDGPU::PRED_SEL_ONE);
948 case AMDGPU::PRED_SEL_ONE:
949 MO2.setReg(AMDGPU::PRED_SEL_ZERO);
958 R600InstrInfo::DefinesPredicate(MachineInstr *MI,
959 std::vector<MachineOperand> &Pred) const {
960 return isPredicateSetter(MI->getOpcode());
965 R600InstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
966 const SmallVectorImpl<MachineOperand> &Pred2) const {
972 R600InstrInfo::PredicateInstruction(MachineInstr *MI,
973 const SmallVectorImpl<MachineOperand> &Pred) const {
974 int PIdx = MI->findFirstPredOperandIdx();
976 if (MI->getOpcode() == AMDGPU::CF_ALU) {
977 MI->getOperand(8).setImm(0);
982 MachineOperand &PMO = MI->getOperand(PIdx);
983 PMO.setReg(Pred[2].getReg());
984 MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
985 MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
992 unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
993 const MachineInstr *MI,
994 unsigned *PredCost) const {
1000 int R600InstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
1001 const MachineRegisterInfo &MRI = MF.getRegInfo();
1002 const MachineFrameInfo *MFI = MF.getFrameInfo();
1005 if (MFI->getNumObjects() == 0) {
1009 if (MRI.livein_empty()) {
1013 for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
1014 LE = MRI.livein_end();
1016 Offset = std::max(Offset,
1017 GET_REG_INDEX(RI.getEncodingValue(LI->first)));
1023 int R600InstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
1025 const MachineFrameInfo *MFI = MF.getFrameInfo();
1027 // Variable sized objects are not supported
1028 assert(!MFI->hasVarSizedObjects());
1030 if (MFI->getNumObjects() == 0) {
1034 Offset = TM.getFrameLowering()->getFrameIndexOffset(MF, -1);
1036 return getIndirectIndexBegin(MF) + Offset;
1039 std::vector<unsigned> R600InstrInfo::getIndirectReservedRegs(
1040 const MachineFunction &MF) const {
1041 const AMDGPUFrameLowering *TFL =
1042 static_cast<const AMDGPUFrameLowering*>(TM.getFrameLowering());
1043 std::vector<unsigned> Regs;
1045 unsigned StackWidth = TFL->getStackWidth(MF);
1046 int End = getIndirectIndexEnd(MF);
1052 for (int Index = getIndirectIndexBegin(MF); Index <= End; ++Index) {
1053 unsigned SuperReg = AMDGPU::R600_Reg128RegClass.getRegister(Index);
1054 Regs.push_back(SuperReg);
1055 for (unsigned Chan = 0; Chan < StackWidth; ++Chan) {
1056 unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister((4 * Index) + Chan);
1057 Regs.push_back(Reg);
1063 unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex,
1064 unsigned Channel) const {
1065 // XXX: Remove when we support a stack width > 2
1066 assert(Channel == 0);
1070 const TargetRegisterClass * R600InstrInfo::getIndirectAddrStoreRegClass(
1071 unsigned SourceReg) const {
1072 return &AMDGPU::R600_TReg32RegClass;
1075 const TargetRegisterClass *R600InstrInfo::getIndirectAddrLoadRegClass() const {
1076 return &AMDGPU::TRegMemRegClass;
1079 MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
1080 MachineBasicBlock::iterator I,
1081 unsigned ValueReg, unsigned Address,
1082 unsigned OffsetReg) const {
1083 unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address);
1084 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
1085 AMDGPU::AR_X, OffsetReg);
1086 setImmOperand(MOVA, AMDGPU::OpName::write, 0);
1088 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
1090 .addReg(AMDGPU::AR_X,
1091 RegState::Implicit | RegState::Kill);
1092 setImmOperand(Mov, AMDGPU::OpName::dst_rel, 1);
1096 MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
1097 MachineBasicBlock::iterator I,
1098 unsigned ValueReg, unsigned Address,
1099 unsigned OffsetReg) const {
1100 unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address);
1101 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
1104 setImmOperand(MOVA, AMDGPU::OpName::write, 0);
1105 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
1108 .addReg(AMDGPU::AR_X,
1109 RegState::Implicit | RegState::Kill);
1110 setImmOperand(Mov, AMDGPU::OpName::src0_rel, 1);
1115 const TargetRegisterClass *R600InstrInfo::getSuperIndirectRegClass() const {
1116 return &AMDGPU::IndirectRegRegClass;
1119 unsigned R600InstrInfo::getMaxAlusPerClause() const {
1123 MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MBB,
1124 MachineBasicBlock::iterator I,
1128 unsigned Src1Reg) const {
1129 MachineInstrBuilder MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opcode),
1133 MIB.addImm(0) // $update_exec_mask
1134 .addImm(0); // $update_predicate
1136 MIB.addImm(1) // $write
1138 .addImm(0) // $dst_rel
1139 .addImm(0) // $dst_clamp
1140 .addReg(Src0Reg) // $src0
1141 .addImm(0) // $src0_neg
1142 .addImm(0) // $src0_rel
1143 .addImm(0) // $src0_abs
1144 .addImm(-1); // $src0_sel
1147 MIB.addReg(Src1Reg) // $src1
1148 .addImm(0) // $src1_neg
1149 .addImm(0) // $src1_rel
1150 .addImm(0) // $src1_abs
1151 .addImm(-1); // $src1_sel
1154 //XXX: The r600g finalizer expects this to be 1, once we've moved the
1155 //scheduling to the backend, we can change the default to 0.
1156 MIB.addImm(1) // $last
1157 .addReg(AMDGPU::PRED_SEL_OFF) // $pred_sel
1158 .addImm(0) // $literal
1159 .addImm(0); // $bank_swizzle
1164 #define OPERAND_CASE(Label) \
1166 static const unsigned Ops[] = \
1176 static unsigned getSlotedOps(unsigned Op, unsigned Slot) {
1178 OPERAND_CASE(AMDGPU::OpName::update_exec_mask)
1179 OPERAND_CASE(AMDGPU::OpName::update_pred)
1180 OPERAND_CASE(AMDGPU::OpName::write)
1181 OPERAND_CASE(AMDGPU::OpName::omod)
1182 OPERAND_CASE(AMDGPU::OpName::dst_rel)
1183 OPERAND_CASE(AMDGPU::OpName::clamp)
1184 OPERAND_CASE(AMDGPU::OpName::src0)
1185 OPERAND_CASE(AMDGPU::OpName::src0_neg)
1186 OPERAND_CASE(AMDGPU::OpName::src0_rel)
1187 OPERAND_CASE(AMDGPU::OpName::src0_abs)
1188 OPERAND_CASE(AMDGPU::OpName::src0_sel)
1189 OPERAND_CASE(AMDGPU::OpName::src1)
1190 OPERAND_CASE(AMDGPU::OpName::src1_neg)
1191 OPERAND_CASE(AMDGPU::OpName::src1_rel)
1192 OPERAND_CASE(AMDGPU::OpName::src1_abs)
1193 OPERAND_CASE(AMDGPU::OpName::src1_sel)
1194 OPERAND_CASE(AMDGPU::OpName::pred_sel)
1196 llvm_unreachable("Wrong Operand");
1202 MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction(
1203 MachineBasicBlock &MBB, MachineInstr *MI, unsigned Slot, unsigned DstReg)
1205 assert (MI->getOpcode() == AMDGPU::DOT_4 && "Not Implemented");
1207 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
1208 if (ST.getGeneration() <= AMDGPUSubtarget::R700)
1209 Opcode = AMDGPU::DOT4_r600;
1211 Opcode = AMDGPU::DOT4_eg;
1212 MachineBasicBlock::iterator I = MI;
1213 MachineOperand &Src0 = MI->getOperand(
1214 getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src0, Slot)));
1215 MachineOperand &Src1 = MI->getOperand(
1216 getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src1, Slot)));
1217 MachineInstr *MIB = buildDefaultInstruction(
1218 MBB, I, Opcode, DstReg, Src0.getReg(), Src1.getReg());
1219 static const unsigned Operands[14] = {
1220 AMDGPU::OpName::update_exec_mask,
1221 AMDGPU::OpName::update_pred,
1222 AMDGPU::OpName::write,
1223 AMDGPU::OpName::omod,
1224 AMDGPU::OpName::dst_rel,
1225 AMDGPU::OpName::clamp,
1226 AMDGPU::OpName::src0_neg,
1227 AMDGPU::OpName::src0_rel,
1228 AMDGPU::OpName::src0_abs,
1229 AMDGPU::OpName::src0_sel,
1230 AMDGPU::OpName::src1_neg,
1231 AMDGPU::OpName::src1_rel,
1232 AMDGPU::OpName::src1_abs,
1233 AMDGPU::OpName::src1_sel,
1236 for (unsigned i = 0; i < 14; i++) {
1237 MachineOperand &MO = MI->getOperand(
1238 getOperandIdx(MI->getOpcode(), getSlotedOps(Operands[i], Slot)));
1239 assert (MO.isImm());
1240 setImmOperand(MIB, Operands[i], MO.getImm());
1242 MIB->getOperand(20).setImm(0);
1246 MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
1247 MachineBasicBlock::iterator I,
1249 uint64_t Imm) const {
1250 MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg,
1251 AMDGPU::ALU_LITERAL_X);
1252 setImmOperand(MovImm, AMDGPU::OpName::literal, Imm);
1256 int R600InstrInfo::getOperandIdx(const MachineInstr &MI, unsigned Op) const {
1257 return getOperandIdx(MI.getOpcode(), Op);
1260 int R600InstrInfo::getOperandIdx(unsigned Opcode, unsigned Op) const {
1261 return AMDGPU::getNamedOperandIdx(Opcode, Op);
1264 void R600InstrInfo::setImmOperand(MachineInstr *MI, unsigned Op,
1265 int64_t Imm) const {
1266 int Idx = getOperandIdx(*MI, Op);
1267 assert(Idx != -1 && "Operand not supported for this instruction.");
1268 assert(MI->getOperand(Idx).isImm());
1269 MI->getOperand(Idx).setImm(Imm);
1272 //===----------------------------------------------------------------------===//
1273 // Instruction flag getters/setters
1274 //===----------------------------------------------------------------------===//
1276 bool R600InstrInfo::hasFlagOperand(const MachineInstr &MI) const {
1277 return GET_FLAG_OPERAND_IDX(get(MI.getOpcode()).TSFlags) != 0;
1280 MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
1281 unsigned Flag) const {
1282 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
1285 // If we pass something other than the default value of Flag to this
1286 // function, it means we are want to set a flag on an instruction
1287 // that uses native encoding.
1288 assert(HAS_NATIVE_OPERANDS(TargetFlags));
1289 bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3;
1292 FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::clamp);
1295 FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::write);
1297 case MO_FLAG_NOT_LAST:
1299 FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::last);
1303 case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_neg); break;
1304 case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_neg); break;
1305 case 2: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src2_neg); break;
1310 assert(!IsOP3 && "Cannot set absolute value modifier for OP3 "
1314 case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_abs); break;
1315 case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_abs); break;
1323 assert(FlagIndex != -1 && "Flag not supported for this instruction");
1325 FlagIndex = GET_FLAG_OPERAND_IDX(TargetFlags);
1326 assert(FlagIndex != 0 &&
1327 "Instruction flags not supported for this instruction");
1330 MachineOperand &FlagOp = MI->getOperand(FlagIndex);
1331 assert(FlagOp.isImm());
1335 void R600InstrInfo::addFlag(MachineInstr *MI, unsigned Operand,
1336 unsigned Flag) const {
1337 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
1341 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
1342 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1343 if (Flag == MO_FLAG_NOT_LAST) {
1344 clearFlag(MI, Operand, MO_FLAG_LAST);
1345 } else if (Flag == MO_FLAG_MASK) {
1346 clearFlag(MI, Operand, Flag);
1351 MachineOperand &FlagOp = getFlagOp(MI, Operand);
1352 FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * Operand)));
1356 void R600InstrInfo::clearFlag(MachineInstr *MI, unsigned Operand,
1357 unsigned Flag) const {
1358 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
1359 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
1360 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1363 MachineOperand &FlagOp = getFlagOp(MI);
1364 unsigned InstFlags = FlagOp.getImm();
1365 InstFlags &= ~(Flag << (NUM_MO_FLAGS * Operand));
1366 FlagOp.setImm(InstFlags);