1 //===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief R600 Implementation of TargetInstrInfo.
13 //===----------------------------------------------------------------------===//
15 #include "R600InstrInfo.h"
17 #include "AMDGPUSubtarget.h"
18 #include "AMDGPUTargetMachine.h"
19 #include "R600Defines.h"
20 #include "R600MachineFunctionInfo.h"
21 #include "R600RegisterInfo.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #define GET_INSTRINFO_CTOR
27 #include "AMDGPUGenDFAPacketizer.inc"
31 R600InstrInfo::R600InstrInfo(AMDGPUTargetMachine &tm)
32 : AMDGPUInstrInfo(tm),
34 ST(tm.getSubtarget<AMDGPUSubtarget>())
37 const R600RegisterInfo &R600InstrInfo::getRegisterInfo() const {
41 bool R600InstrInfo::isTrig(const MachineInstr &MI) const {
42 return get(MI.getOpcode()).TSFlags & R600_InstFlag::TRIG;
45 bool R600InstrInfo::isVector(const MachineInstr &MI) const {
46 return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR;
50 R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
51 MachineBasicBlock::iterator MI, DebugLoc DL,
52 unsigned DestReg, unsigned SrcReg,
54 if (AMDGPU::R600_Reg128RegClass.contains(DestReg)
55 && AMDGPU::R600_Reg128RegClass.contains(SrcReg)) {
56 for (unsigned I = 0; I < 4; I++) {
57 unsigned SubRegIndex = RI.getSubRegFromChannel(I);
58 buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
59 RI.getSubReg(DestReg, SubRegIndex),
60 RI.getSubReg(SrcReg, SubRegIndex))
62 RegState::Define | RegState::Implicit);
66 // We can't copy vec4 registers
67 assert(!AMDGPU::R600_Reg128RegClass.contains(DestReg)
68 && !AMDGPU::R600_Reg128RegClass.contains(SrcReg));
70 MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
72 NewMI->getOperand(getOperandIdx(*NewMI, AMDGPU::OpName::src0))
77 MachineInstr * R600InstrInfo::getMovImmInstr(MachineFunction *MF,
78 unsigned DstReg, int64_t Imm) const {
79 MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::MOV), DebugLoc());
80 MachineInstrBuilder MIB(*MF, MI);
81 MIB.addReg(DstReg, RegState::Define);
82 MIB.addReg(AMDGPU::ALU_LITERAL_X);
84 MIB.addReg(0); // PREDICATE_BIT
89 unsigned R600InstrInfo::getIEQOpcode() const {
90 return AMDGPU::SETE_INT;
93 bool R600InstrInfo::isMov(unsigned Opcode) const {
97 default: return false;
99 case AMDGPU::MOV_IMM_F32:
100 case AMDGPU::MOV_IMM_I32:
105 // Some instructions act as place holders to emulate operations that the GPU
106 // hardware does automatically. This function can be used to check if
107 // an opcode falls into this category.
108 bool R600InstrInfo::isPlaceHolderOpcode(unsigned Opcode) const {
110 default: return false;
116 bool R600InstrInfo::isReductionOp(unsigned Opcode) const {
120 bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
122 default: return false;
123 case AMDGPU::CUBE_r600_pseudo:
124 case AMDGPU::CUBE_r600_real:
125 case AMDGPU::CUBE_eg_pseudo:
126 case AMDGPU::CUBE_eg_real:
131 bool R600InstrInfo::isALUInstr(unsigned Opcode) const {
132 unsigned TargetFlags = get(Opcode).TSFlags;
134 return (TargetFlags & R600_InstFlag::ALU_INST);
137 bool R600InstrInfo::hasInstrModifiers(unsigned Opcode) const {
138 unsigned TargetFlags = get(Opcode).TSFlags;
140 return ((TargetFlags & R600_InstFlag::OP1) |
141 (TargetFlags & R600_InstFlag::OP2) |
142 (TargetFlags & R600_InstFlag::OP3));
145 bool R600InstrInfo::isLDSInstr(unsigned Opcode) const {
146 unsigned TargetFlags = get(Opcode).TSFlags;
148 return ((TargetFlags & R600_InstFlag::LDS_1A) |
149 (TargetFlags & R600_InstFlag::LDS_1A1D));
152 bool R600InstrInfo::isTransOnly(unsigned Opcode) const {
153 return (get(Opcode).TSFlags & R600_InstFlag::TRANS_ONLY);
156 bool R600InstrInfo::isTransOnly(const MachineInstr *MI) const {
157 return isTransOnly(MI->getOpcode());
160 bool R600InstrInfo::usesVertexCache(unsigned Opcode) const {
161 return ST.hasVertexCache() && IS_VTX(get(Opcode));
164 bool R600InstrInfo::usesVertexCache(const MachineInstr *MI) const {
165 const R600MachineFunctionInfo *MFI = MI->getParent()->getParent()->getInfo<R600MachineFunctionInfo>();
166 return MFI->ShaderType != ShaderType::COMPUTE && usesVertexCache(MI->getOpcode());
169 bool R600InstrInfo::usesTextureCache(unsigned Opcode) const {
170 return (!ST.hasVertexCache() && IS_VTX(get(Opcode))) || IS_TEX(get(Opcode));
173 bool R600InstrInfo::usesTextureCache(const MachineInstr *MI) const {
174 const R600MachineFunctionInfo *MFI = MI->getParent()->getParent()->getInfo<R600MachineFunctionInfo>();
175 return (MFI->ShaderType == ShaderType::COMPUTE && usesVertexCache(MI->getOpcode())) ||
176 usesTextureCache(MI->getOpcode());
179 bool R600InstrInfo::mustBeLastInClause(unsigned Opcode) const {
182 case AMDGPU::GROUP_BARRIER:
189 SmallVector<std::pair<MachineOperand *, int64_t>, 3>
190 R600InstrInfo::getSrcs(MachineInstr *MI) const {
191 SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result;
193 if (MI->getOpcode() == AMDGPU::DOT_4) {
194 static const unsigned OpTable[8][2] = {
195 {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
196 {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
197 {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z},
198 {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W},
199 {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X},
200 {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y},
201 {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z},
202 {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W},
205 for (unsigned j = 0; j < 8; j++) {
206 MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(),
208 unsigned Reg = MO.getReg();
209 if (Reg == AMDGPU::ALU_CONST) {
210 unsigned Sel = MI->getOperand(getOperandIdx(MI->getOpcode(),
211 OpTable[j][1])).getImm();
212 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel));
220 static const unsigned OpTable[3][2] = {
221 {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
222 {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel},
223 {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel},
226 for (unsigned j = 0; j < 3; j++) {
227 int SrcIdx = getOperandIdx(MI->getOpcode(), OpTable[j][0]);
230 MachineOperand &MO = MI->getOperand(SrcIdx);
231 unsigned Reg = MI->getOperand(SrcIdx).getReg();
232 if (Reg == AMDGPU::ALU_CONST) {
233 unsigned Sel = MI->getOperand(
234 getOperandIdx(MI->getOpcode(), OpTable[j][1])).getImm();
235 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel));
238 if (Reg == AMDGPU::ALU_LITERAL_X) {
239 unsigned Imm = MI->getOperand(
240 getOperandIdx(MI->getOpcode(), AMDGPU::OpName::literal)).getImm();
241 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Imm));
244 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, 0));
249 std::vector<std::pair<int, unsigned> >
250 R600InstrInfo::ExtractSrcs(MachineInstr *MI,
251 const DenseMap<unsigned, unsigned> &PV,
252 unsigned &ConstCount) const {
254 const SmallVector<std::pair<MachineOperand *, int64_t>, 3> Srcs = getSrcs(MI);
255 const std::pair<int, unsigned> DummyPair(-1, 0);
256 std::vector<std::pair<int, unsigned> > Result;
258 for (unsigned n = Srcs.size(); i < n; ++i) {
259 unsigned Reg = Srcs[i].first->getReg();
260 unsigned Index = RI.getEncodingValue(Reg) & 0xff;
261 if (Reg == AMDGPU::OQAP) {
262 Result.push_back(std::pair<int, unsigned>(Index, 0));
264 if (PV.find(Reg) != PV.end()) {
265 // 255 is used to tells its a PS/PV reg
266 Result.push_back(std::pair<int, unsigned>(255, 0));
271 Result.push_back(DummyPair);
274 unsigned Chan = RI.getHWRegChan(Reg);
275 Result.push_back(std::pair<int, unsigned>(Index, Chan));
278 Result.push_back(DummyPair);
282 static std::vector<std::pair<int, unsigned> >
283 Swizzle(std::vector<std::pair<int, unsigned> > Src,
284 R600InstrInfo::BankSwizzle Swz) {
286 case R600InstrInfo::ALU_VEC_012_SCL_210:
288 case R600InstrInfo::ALU_VEC_021_SCL_122:
289 std::swap(Src[1], Src[2]);
291 case R600InstrInfo::ALU_VEC_102_SCL_221:
292 std::swap(Src[0], Src[1]);
294 case R600InstrInfo::ALU_VEC_120_SCL_212:
295 std::swap(Src[0], Src[1]);
296 std::swap(Src[0], Src[2]);
298 case R600InstrInfo::ALU_VEC_201:
299 std::swap(Src[0], Src[2]);
300 std::swap(Src[0], Src[1]);
302 case R600InstrInfo::ALU_VEC_210:
303 std::swap(Src[0], Src[2]);
310 getTransSwizzle(R600InstrInfo::BankSwizzle Swz, unsigned Op) {
312 case R600InstrInfo::ALU_VEC_012_SCL_210: {
313 unsigned Cycles[3] = { 2, 1, 0};
316 case R600InstrInfo::ALU_VEC_021_SCL_122: {
317 unsigned Cycles[3] = { 1, 2, 2};
320 case R600InstrInfo::ALU_VEC_120_SCL_212: {
321 unsigned Cycles[3] = { 2, 1, 2};
324 case R600InstrInfo::ALU_VEC_102_SCL_221: {
325 unsigned Cycles[3] = { 2, 2, 1};
329 llvm_unreachable("Wrong Swizzle for Trans Slot");
334 /// returns how many MIs (whose inputs are represented by IGSrcs) can be packed
335 /// in the same Instruction Group while meeting read port limitations given a
336 /// Swz swizzle sequence.
337 unsigned R600InstrInfo::isLegalUpTo(
338 const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
339 const std::vector<R600InstrInfo::BankSwizzle> &Swz,
340 const std::vector<std::pair<int, unsigned> > &TransSrcs,
341 R600InstrInfo::BankSwizzle TransSwz) const {
343 memset(Vector, -1, sizeof(Vector));
344 for (unsigned i = 0, e = IGSrcs.size(); i < e; i++) {
345 const std::vector<std::pair<int, unsigned> > &Srcs =
346 Swizzle(IGSrcs[i], Swz[i]);
347 for (unsigned j = 0; j < 3; j++) {
348 const std::pair<int, unsigned> &Src = Srcs[j];
349 if (Src.first < 0 || Src.first == 255)
351 if (Src.first == GET_REG_INDEX(RI.getEncodingValue(AMDGPU::OQAP))) {
352 if (Swz[i] != R600InstrInfo::ALU_VEC_012_SCL_210 &&
353 Swz[i] != R600InstrInfo::ALU_VEC_021_SCL_122) {
354 // The value from output queue A (denoted by register OQAP) can
355 // only be fetched during the first cycle.
358 // OQAP does not count towards the normal read port restrictions
361 if (Vector[Src.second][j] < 0)
362 Vector[Src.second][j] = Src.first;
363 if (Vector[Src.second][j] != Src.first)
367 // Now check Trans Alu
368 for (unsigned i = 0, e = TransSrcs.size(); i < e; ++i) {
369 const std::pair<int, unsigned> &Src = TransSrcs[i];
370 unsigned Cycle = getTransSwizzle(TransSwz, i);
373 if (Src.first == 255)
375 if (Vector[Src.second][Cycle] < 0)
376 Vector[Src.second][Cycle] = Src.first;
377 if (Vector[Src.second][Cycle] != Src.first)
378 return IGSrcs.size() - 1;
380 return IGSrcs.size();
383 /// Given a swizzle sequence SwzCandidate and an index Idx, returns the next
384 /// (in lexicographic term) swizzle sequence assuming that all swizzles after
385 /// Idx can be skipped
387 NextPossibleSolution(
388 std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
390 assert(Idx < SwzCandidate.size());
392 while (ResetIdx > -1 && SwzCandidate[ResetIdx] == R600InstrInfo::ALU_VEC_210)
394 for (unsigned i = ResetIdx + 1, e = SwzCandidate.size(); i < e; i++) {
395 SwzCandidate[i] = R600InstrInfo::ALU_VEC_012_SCL_210;
399 int NextSwizzle = SwzCandidate[ResetIdx] + 1;
400 SwzCandidate[ResetIdx] = (R600InstrInfo::BankSwizzle)NextSwizzle;
404 /// Enumerate all possible Swizzle sequence to find one that can meet all
405 /// read port requirements.
406 bool R600InstrInfo::FindSwizzleForVectorSlot(
407 const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
408 std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
409 const std::vector<std::pair<int, unsigned> > &TransSrcs,
410 R600InstrInfo::BankSwizzle TransSwz) const {
411 unsigned ValidUpTo = 0;
413 ValidUpTo = isLegalUpTo(IGSrcs, SwzCandidate, TransSrcs, TransSwz);
414 if (ValidUpTo == IGSrcs.size())
416 } while (NextPossibleSolution(SwzCandidate, ValidUpTo));
420 /// Instructions in Trans slot can't read gpr at cycle 0 if they also read
421 /// a const, and can't read a gpr at cycle 1 if they read 2 const.
423 isConstCompatible(R600InstrInfo::BankSwizzle TransSwz,
424 const std::vector<std::pair<int, unsigned> > &TransOps,
425 unsigned ConstCount) {
426 for (unsigned i = 0, e = TransOps.size(); i < e; ++i) {
427 const std::pair<int, unsigned> &Src = TransOps[i];
428 unsigned Cycle = getTransSwizzle(TransSwz, i);
431 if (ConstCount > 0 && Cycle == 0)
433 if (ConstCount > 1 && Cycle == 1)
440 R600InstrInfo::fitsReadPortLimitations(const std::vector<MachineInstr *> &IG,
441 const DenseMap<unsigned, unsigned> &PV,
442 std::vector<BankSwizzle> &ValidSwizzle,
445 //Todo : support shared src0 - src1 operand
447 std::vector<std::vector<std::pair<int, unsigned> > > IGSrcs;
448 ValidSwizzle.clear();
450 BankSwizzle TransBS = ALU_VEC_012_SCL_210;
451 for (unsigned i = 0, e = IG.size(); i < e; ++i) {
452 IGSrcs.push_back(ExtractSrcs(IG[i], PV, ConstCount));
453 unsigned Op = getOperandIdx(IG[i]->getOpcode(),
454 AMDGPU::OpName::bank_swizzle);
455 ValidSwizzle.push_back( (R600InstrInfo::BankSwizzle)
456 IG[i]->getOperand(Op).getImm());
458 std::vector<std::pair<int, unsigned> > TransOps;
460 return FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps, TransBS);
462 TransOps = IGSrcs.back();
464 ValidSwizzle.pop_back();
466 static const R600InstrInfo::BankSwizzle TransSwz[] = {
472 for (unsigned i = 0; i < 4; i++) {
473 TransBS = TransSwz[i];
474 if (!isConstCompatible(TransBS, TransOps, ConstCount))
476 bool Result = FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps,
479 ValidSwizzle.push_back(TransBS);
489 R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts)
491 assert (Consts.size() <= 12 && "Too many operands in instructions group");
492 unsigned Pair1 = 0, Pair2 = 0;
493 for (unsigned i = 0, n = Consts.size(); i < n; ++i) {
494 unsigned ReadConstHalf = Consts[i] & 2;
495 unsigned ReadConstIndex = Consts[i] & (~3);
496 unsigned ReadHalfConst = ReadConstIndex | ReadConstHalf;
498 Pair1 = ReadHalfConst;
501 if (Pair1 == ReadHalfConst)
504 Pair2 = ReadHalfConst;
507 if (Pair2 != ReadHalfConst)
514 R600InstrInfo::fitsConstReadLimitations(const std::vector<MachineInstr *> &MIs)
516 std::vector<unsigned> Consts;
517 for (unsigned i = 0, n = MIs.size(); i < n; i++) {
518 MachineInstr *MI = MIs[i];
519 if (!isALUInstr(MI->getOpcode()))
522 const SmallVector<std::pair<MachineOperand *, int64_t>, 3> &Srcs =
525 for (unsigned j = 0, e = Srcs.size(); j < e; j++) {
526 std::pair<MachineOperand *, unsigned> Src = Srcs[j];
527 if (Src.first->getReg() == AMDGPU::ALU_CONST)
528 Consts.push_back(Src.second);
529 if (AMDGPU::R600_KC0RegClass.contains(Src.first->getReg()) ||
530 AMDGPU::R600_KC1RegClass.contains(Src.first->getReg())) {
531 unsigned Index = RI.getEncodingValue(Src.first->getReg()) & 0xff;
532 unsigned Chan = RI.getHWRegChan(Src.first->getReg());
533 Consts.push_back((Index << 2) | Chan);
537 return fitsConstReadLimitations(Consts);
540 DFAPacketizer *R600InstrInfo::CreateTargetScheduleState(const TargetMachine *TM,
541 const ScheduleDAG *DAG) const {
542 const InstrItineraryData *II = TM->getInstrItineraryData();
543 return TM->getSubtarget<AMDGPUSubtarget>().createDFAPacketizer(II);
547 isPredicateSetter(unsigned Opcode) {
556 static MachineInstr *
557 findFirstPredicateSetterFrom(MachineBasicBlock &MBB,
558 MachineBasicBlock::iterator I) {
559 while (I != MBB.begin()) {
561 MachineInstr *MI = I;
562 if (isPredicateSetter(MI->getOpcode()))
570 bool isJump(unsigned Opcode) {
571 return Opcode == AMDGPU::JUMP || Opcode == AMDGPU::JUMP_COND;
575 R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
576 MachineBasicBlock *&TBB,
577 MachineBasicBlock *&FBB,
578 SmallVectorImpl<MachineOperand> &Cond,
579 bool AllowModify) const {
580 // Most of the following comes from the ARM implementation of AnalyzeBranch
582 // If the block has no terminators, it just falls into the block after it.
583 MachineBasicBlock::iterator I = MBB.end();
584 if (I == MBB.begin())
587 while (I->isDebugValue()) {
588 if (I == MBB.begin())
592 if (!isJump(static_cast<MachineInstr *>(I)->getOpcode())) {
596 // Get the last instruction in the block.
597 MachineInstr *LastInst = I;
599 // If there is only one terminator instruction, process it.
600 unsigned LastOpc = LastInst->getOpcode();
601 if (I == MBB.begin() ||
602 !isJump(static_cast<MachineInstr *>(--I)->getOpcode())) {
603 if (LastOpc == AMDGPU::JUMP) {
604 TBB = LastInst->getOperand(0).getMBB();
606 } else if (LastOpc == AMDGPU::JUMP_COND) {
607 MachineInstr *predSet = I;
608 while (!isPredicateSetter(predSet->getOpcode())) {
611 TBB = LastInst->getOperand(0).getMBB();
612 Cond.push_back(predSet->getOperand(1));
613 Cond.push_back(predSet->getOperand(2));
614 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
617 return true; // Can't handle indirect branch.
620 // Get the instruction before it if it is a terminator.
621 MachineInstr *SecondLastInst = I;
622 unsigned SecondLastOpc = SecondLastInst->getOpcode();
624 // If the block ends with a B and a Bcc, handle it.
625 if (SecondLastOpc == AMDGPU::JUMP_COND && LastOpc == AMDGPU::JUMP) {
626 MachineInstr *predSet = --I;
627 while (!isPredicateSetter(predSet->getOpcode())) {
630 TBB = SecondLastInst->getOperand(0).getMBB();
631 FBB = LastInst->getOperand(0).getMBB();
632 Cond.push_back(predSet->getOperand(1));
633 Cond.push_back(predSet->getOperand(2));
634 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
638 // Otherwise, can't handle this.
642 int R600InstrInfo::getBranchInstr(const MachineOperand &op) const {
643 const MachineInstr *MI = op.getParent();
645 switch (MI->getDesc().OpInfo->RegClass) {
646 default: // FIXME: fallthrough??
647 case AMDGPU::GPRI32RegClassID: return AMDGPU::BRANCH_COND_i32;
648 case AMDGPU::GPRF32RegClassID: return AMDGPU::BRANCH_COND_f32;
653 MachineBasicBlock::iterator FindLastAluClause(MachineBasicBlock &MBB) {
654 for (MachineBasicBlock::reverse_iterator It = MBB.rbegin(), E = MBB.rend();
656 if (It->getOpcode() == AMDGPU::CF_ALU ||
657 It->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE)
658 return llvm::prior(It.base());
664 R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
665 MachineBasicBlock *TBB,
666 MachineBasicBlock *FBB,
667 const SmallVectorImpl<MachineOperand> &Cond,
669 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
673 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(TBB);
676 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
677 assert(PredSet && "No previous predicate !");
678 addFlag(PredSet, 0, MO_FLAG_PUSH);
679 PredSet->getOperand(2).setImm(Cond[1].getImm());
681 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
683 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
684 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
685 if (CfAlu == MBB.end())
687 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU);
688 CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE));
692 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
693 assert(PredSet && "No previous predicate !");
694 addFlag(PredSet, 0, MO_FLAG_PUSH);
695 PredSet->getOperand(2).setImm(Cond[1].getImm());
696 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
698 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
699 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(FBB);
700 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
701 if (CfAlu == MBB.end())
703 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU);
704 CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE));
710 R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
712 // Note : we leave PRED* instructions there.
713 // They may be needed when predicating instructions.
715 MachineBasicBlock::iterator I = MBB.end();
717 if (I == MBB.begin()) {
721 switch (I->getOpcode()) {
724 case AMDGPU::JUMP_COND: {
725 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
726 clearFlag(predSet, 0, MO_FLAG_PUSH);
727 I->eraseFromParent();
728 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
729 if (CfAlu == MBB.end())
731 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE);
732 CfAlu->setDesc(get(AMDGPU::CF_ALU));
736 I->eraseFromParent();
741 if (I == MBB.begin()) {
745 switch (I->getOpcode()) {
746 // FIXME: only one case??
749 case AMDGPU::JUMP_COND: {
750 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
751 clearFlag(predSet, 0, MO_FLAG_PUSH);
752 I->eraseFromParent();
753 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
754 if (CfAlu == MBB.end())
756 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE);
757 CfAlu->setDesc(get(AMDGPU::CF_ALU));
761 I->eraseFromParent();
768 R600InstrInfo::isPredicated(const MachineInstr *MI) const {
769 int idx = MI->findFirstPredOperandIdx();
773 unsigned Reg = MI->getOperand(idx).getReg();
775 default: return false;
776 case AMDGPU::PRED_SEL_ONE:
777 case AMDGPU::PRED_SEL_ZERO:
778 case AMDGPU::PREDICATE_BIT:
784 R600InstrInfo::isPredicable(MachineInstr *MI) const {
785 // XXX: KILL* instructions can be predicated, but they must be the last
786 // instruction in a clause, so this means any instructions after them cannot
787 // be predicated. Until we have proper support for instruction clauses in the
788 // backend, we will mark KILL* instructions as unpredicable.
790 if (MI->getOpcode() == AMDGPU::KILLGT) {
792 } else if (MI->getOpcode() == AMDGPU::CF_ALU) {
793 // If the clause start in the middle of MBB then the MBB has more
794 // than a single clause, unable to predicate several clauses.
795 if (MI->getParent()->begin() != MachineBasicBlock::iterator(MI))
797 // TODO: We don't support KC merging atm
798 if (MI->getOperand(3).getImm() != 0 || MI->getOperand(4).getImm() != 0)
801 } else if (isVector(*MI)) {
804 return AMDGPUInstrInfo::isPredicable(MI);
810 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
812 unsigned ExtraPredCycles,
813 const BranchProbability &Probability) const{
818 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
820 unsigned ExtraTCycles,
821 MachineBasicBlock &FMBB,
823 unsigned ExtraFCycles,
824 const BranchProbability &Probability) const {
829 R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
831 const BranchProbability &Probability)
837 R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
838 MachineBasicBlock &FMBB) const {
844 R600InstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
845 MachineOperand &MO = Cond[1];
846 switch (MO.getImm()) {
847 case OPCODE_IS_ZERO_INT:
848 MO.setImm(OPCODE_IS_NOT_ZERO_INT);
850 case OPCODE_IS_NOT_ZERO_INT:
851 MO.setImm(OPCODE_IS_ZERO_INT);
854 MO.setImm(OPCODE_IS_NOT_ZERO);
856 case OPCODE_IS_NOT_ZERO:
857 MO.setImm(OPCODE_IS_ZERO);
863 MachineOperand &MO2 = Cond[2];
864 switch (MO2.getReg()) {
865 case AMDGPU::PRED_SEL_ZERO:
866 MO2.setReg(AMDGPU::PRED_SEL_ONE);
868 case AMDGPU::PRED_SEL_ONE:
869 MO2.setReg(AMDGPU::PRED_SEL_ZERO);
878 R600InstrInfo::DefinesPredicate(MachineInstr *MI,
879 std::vector<MachineOperand> &Pred) const {
880 return isPredicateSetter(MI->getOpcode());
885 R600InstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
886 const SmallVectorImpl<MachineOperand> &Pred2) const {
892 R600InstrInfo::PredicateInstruction(MachineInstr *MI,
893 const SmallVectorImpl<MachineOperand> &Pred) const {
894 int PIdx = MI->findFirstPredOperandIdx();
896 if (MI->getOpcode() == AMDGPU::CF_ALU) {
897 MI->getOperand(8).setImm(0);
902 MachineOperand &PMO = MI->getOperand(PIdx);
903 PMO.setReg(Pred[2].getReg());
904 MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
905 MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
912 unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
913 const MachineInstr *MI,
914 unsigned *PredCost) const {
920 int R600InstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
921 const MachineRegisterInfo &MRI = MF.getRegInfo();
922 const MachineFrameInfo *MFI = MF.getFrameInfo();
925 if (MFI->getNumObjects() == 0) {
929 if (MRI.livein_empty()) {
933 for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
934 LE = MRI.livein_end();
936 Offset = std::max(Offset,
937 GET_REG_INDEX(RI.getEncodingValue(LI->first)));
943 int R600InstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
945 const MachineFrameInfo *MFI = MF.getFrameInfo();
947 // Variable sized objects are not supported
948 assert(!MFI->hasVarSizedObjects());
950 if (MFI->getNumObjects() == 0) {
954 Offset = TM.getFrameLowering()->getFrameIndexOffset(MF, -1);
956 return getIndirectIndexBegin(MF) + Offset;
959 std::vector<unsigned> R600InstrInfo::getIndirectReservedRegs(
960 const MachineFunction &MF) const {
961 const AMDGPUFrameLowering *TFL =
962 static_cast<const AMDGPUFrameLowering*>(TM.getFrameLowering());
963 std::vector<unsigned> Regs;
965 unsigned StackWidth = TFL->getStackWidth(MF);
966 int End = getIndirectIndexEnd(MF);
972 for (int Index = getIndirectIndexBegin(MF); Index <= End; ++Index) {
973 unsigned SuperReg = AMDGPU::R600_Reg128RegClass.getRegister(Index);
974 Regs.push_back(SuperReg);
975 for (unsigned Chan = 0; Chan < StackWidth; ++Chan) {
976 unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister((4 * Index) + Chan);
983 unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex,
984 unsigned Channel) const {
985 // XXX: Remove when we support a stack width > 2
986 assert(Channel == 0);
990 const TargetRegisterClass * R600InstrInfo::getIndirectAddrStoreRegClass(
991 unsigned SourceReg) const {
992 return &AMDGPU::R600_TReg32RegClass;
995 const TargetRegisterClass *R600InstrInfo::getIndirectAddrLoadRegClass() const {
996 return &AMDGPU::TRegMemRegClass;
999 MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
1000 MachineBasicBlock::iterator I,
1001 unsigned ValueReg, unsigned Address,
1002 unsigned OffsetReg) const {
1003 unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address);
1004 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
1005 AMDGPU::AR_X, OffsetReg);
1006 setImmOperand(MOVA, AMDGPU::OpName::write, 0);
1008 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
1010 .addReg(AMDGPU::AR_X,
1011 RegState::Implicit | RegState::Kill);
1012 setImmOperand(Mov, AMDGPU::OpName::dst_rel, 1);
1016 MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
1017 MachineBasicBlock::iterator I,
1018 unsigned ValueReg, unsigned Address,
1019 unsigned OffsetReg) const {
1020 unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address);
1021 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
1024 setImmOperand(MOVA, AMDGPU::OpName::write, 0);
1025 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
1028 .addReg(AMDGPU::AR_X,
1029 RegState::Implicit | RegState::Kill);
1030 setImmOperand(Mov, AMDGPU::OpName::src0_rel, 1);
1035 const TargetRegisterClass *R600InstrInfo::getSuperIndirectRegClass() const {
1036 return &AMDGPU::IndirectRegRegClass;
1039 unsigned R600InstrInfo::getMaxAlusPerClause() const {
1043 MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MBB,
1044 MachineBasicBlock::iterator I,
1048 unsigned Src1Reg) const {
1049 MachineInstrBuilder MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opcode),
1053 MIB.addImm(0) // $update_exec_mask
1054 .addImm(0); // $update_predicate
1056 MIB.addImm(1) // $write
1058 .addImm(0) // $dst_rel
1059 .addImm(0) // $dst_clamp
1060 .addReg(Src0Reg) // $src0
1061 .addImm(0) // $src0_neg
1062 .addImm(0) // $src0_rel
1063 .addImm(0) // $src0_abs
1064 .addImm(-1); // $src0_sel
1067 MIB.addReg(Src1Reg) // $src1
1068 .addImm(0) // $src1_neg
1069 .addImm(0) // $src1_rel
1070 .addImm(0) // $src1_abs
1071 .addImm(-1); // $src1_sel
1074 //XXX: The r600g finalizer expects this to be 1, once we've moved the
1075 //scheduling to the backend, we can change the default to 0.
1076 MIB.addImm(1) // $last
1077 .addReg(AMDGPU::PRED_SEL_OFF) // $pred_sel
1078 .addImm(0) // $literal
1079 .addImm(0); // $bank_swizzle
1084 #define OPERAND_CASE(Label) \
1086 static const unsigned Ops[] = \
1096 static unsigned getSlotedOps(unsigned Op, unsigned Slot) {
1098 OPERAND_CASE(AMDGPU::OpName::update_exec_mask)
1099 OPERAND_CASE(AMDGPU::OpName::update_pred)
1100 OPERAND_CASE(AMDGPU::OpName::write)
1101 OPERAND_CASE(AMDGPU::OpName::omod)
1102 OPERAND_CASE(AMDGPU::OpName::dst_rel)
1103 OPERAND_CASE(AMDGPU::OpName::clamp)
1104 OPERAND_CASE(AMDGPU::OpName::src0)
1105 OPERAND_CASE(AMDGPU::OpName::src0_neg)
1106 OPERAND_CASE(AMDGPU::OpName::src0_rel)
1107 OPERAND_CASE(AMDGPU::OpName::src0_abs)
1108 OPERAND_CASE(AMDGPU::OpName::src0_sel)
1109 OPERAND_CASE(AMDGPU::OpName::src1)
1110 OPERAND_CASE(AMDGPU::OpName::src1_neg)
1111 OPERAND_CASE(AMDGPU::OpName::src1_rel)
1112 OPERAND_CASE(AMDGPU::OpName::src1_abs)
1113 OPERAND_CASE(AMDGPU::OpName::src1_sel)
1114 OPERAND_CASE(AMDGPU::OpName::pred_sel)
1116 llvm_unreachable("Wrong Operand");
1122 MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction(
1123 MachineBasicBlock &MBB, MachineInstr *MI, unsigned Slot, unsigned DstReg)
1125 assert (MI->getOpcode() == AMDGPU::DOT_4 && "Not Implemented");
1127 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
1128 if (ST.getGeneration() <= AMDGPUSubtarget::R700)
1129 Opcode = AMDGPU::DOT4_r600;
1131 Opcode = AMDGPU::DOT4_eg;
1132 MachineBasicBlock::iterator I = MI;
1133 MachineOperand &Src0 = MI->getOperand(
1134 getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src0, Slot)));
1135 MachineOperand &Src1 = MI->getOperand(
1136 getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src1, Slot)));
1137 MachineInstr *MIB = buildDefaultInstruction(
1138 MBB, I, Opcode, DstReg, Src0.getReg(), Src1.getReg());
1139 static const unsigned Operands[14] = {
1140 AMDGPU::OpName::update_exec_mask,
1141 AMDGPU::OpName::update_pred,
1142 AMDGPU::OpName::write,
1143 AMDGPU::OpName::omod,
1144 AMDGPU::OpName::dst_rel,
1145 AMDGPU::OpName::clamp,
1146 AMDGPU::OpName::src0_neg,
1147 AMDGPU::OpName::src0_rel,
1148 AMDGPU::OpName::src0_abs,
1149 AMDGPU::OpName::src0_sel,
1150 AMDGPU::OpName::src1_neg,
1151 AMDGPU::OpName::src1_rel,
1152 AMDGPU::OpName::src1_abs,
1153 AMDGPU::OpName::src1_sel,
1156 for (unsigned i = 0; i < 14; i++) {
1157 MachineOperand &MO = MI->getOperand(
1158 getOperandIdx(MI->getOpcode(), getSlotedOps(Operands[i], Slot)));
1159 assert (MO.isImm());
1160 setImmOperand(MIB, Operands[i], MO.getImm());
1162 MIB->getOperand(20).setImm(0);
1166 MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
1167 MachineBasicBlock::iterator I,
1169 uint64_t Imm) const {
1170 MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg,
1171 AMDGPU::ALU_LITERAL_X);
1172 setImmOperand(MovImm, AMDGPU::OpName::literal, Imm);
1176 int R600InstrInfo::getOperandIdx(const MachineInstr &MI, unsigned Op) const {
1177 return getOperandIdx(MI.getOpcode(), Op);
1180 int R600InstrInfo::getOperandIdx(unsigned Opcode, unsigned Op) const {
1181 return AMDGPU::getNamedOperandIdx(Opcode, Op);
1184 void R600InstrInfo::setImmOperand(MachineInstr *MI, unsigned Op,
1185 int64_t Imm) const {
1186 int Idx = getOperandIdx(*MI, Op);
1187 assert(Idx != -1 && "Operand not supported for this instruction.");
1188 assert(MI->getOperand(Idx).isImm());
1189 MI->getOperand(Idx).setImm(Imm);
1192 //===----------------------------------------------------------------------===//
1193 // Instruction flag getters/setters
1194 //===----------------------------------------------------------------------===//
1196 bool R600InstrInfo::hasFlagOperand(const MachineInstr &MI) const {
1197 return GET_FLAG_OPERAND_IDX(get(MI.getOpcode()).TSFlags) != 0;
1200 MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
1201 unsigned Flag) const {
1202 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
1205 // If we pass something other than the default value of Flag to this
1206 // function, it means we are want to set a flag on an instruction
1207 // that uses native encoding.
1208 assert(HAS_NATIVE_OPERANDS(TargetFlags));
1209 bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3;
1212 FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::clamp);
1215 FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::write);
1217 case MO_FLAG_NOT_LAST:
1219 FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::last);
1223 case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_neg); break;
1224 case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_neg); break;
1225 case 2: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src2_neg); break;
1230 assert(!IsOP3 && "Cannot set absolute value modifier for OP3 "
1234 case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_abs); break;
1235 case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_abs); break;
1243 assert(FlagIndex != -1 && "Flag not supported for this instruction");
1245 FlagIndex = GET_FLAG_OPERAND_IDX(TargetFlags);
1246 assert(FlagIndex != 0 &&
1247 "Instruction flags not supported for this instruction");
1250 MachineOperand &FlagOp = MI->getOperand(FlagIndex);
1251 assert(FlagOp.isImm());
1255 void R600InstrInfo::addFlag(MachineInstr *MI, unsigned Operand,
1256 unsigned Flag) const {
1257 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
1261 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
1262 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1263 if (Flag == MO_FLAG_NOT_LAST) {
1264 clearFlag(MI, Operand, MO_FLAG_LAST);
1265 } else if (Flag == MO_FLAG_MASK) {
1266 clearFlag(MI, Operand, Flag);
1271 MachineOperand &FlagOp = getFlagOp(MI, Operand);
1272 FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * Operand)));
1276 void R600InstrInfo::clearFlag(MachineInstr *MI, unsigned Operand,
1277 unsigned Flag) const {
1278 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
1279 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
1280 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1283 MachineOperand &FlagOp = getFlagOp(MI);
1284 unsigned InstFlags = FlagOp.getImm();
1285 InstFlags &= ~(Flag << (NUM_MO_FLAGS * Operand));
1286 FlagOp.setImm(InstFlags);