1 //===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief R600 Implementation of TargetInstrInfo.
13 //===----------------------------------------------------------------------===//
15 #include "R600InstrInfo.h"
17 #include "AMDGPUSubtarget.h"
18 #include "AMDGPUTargetMachine.h"
19 #include "R600Defines.h"
20 #include "R600MachineFunctionInfo.h"
21 #include "R600RegisterInfo.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #define GET_INSTRINFO_CTOR
27 #include "AMDGPUGenDFAPacketizer.inc"
31 R600InstrInfo::R600InstrInfo(AMDGPUTargetMachine &tm)
32 : AMDGPUInstrInfo(tm),
34 ST(tm.getSubtarget<AMDGPUSubtarget>())
37 const R600RegisterInfo &R600InstrInfo::getRegisterInfo() const {
41 bool R600InstrInfo::isTrig(const MachineInstr &MI) const {
42 return get(MI.getOpcode()).TSFlags & R600_InstFlag::TRIG;
45 bool R600InstrInfo::isVector(const MachineInstr &MI) const {
46 return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR;
50 R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
51 MachineBasicBlock::iterator MI, DebugLoc DL,
52 unsigned DestReg, unsigned SrcReg,
54 if (AMDGPU::R600_Reg128RegClass.contains(DestReg)
55 && AMDGPU::R600_Reg128RegClass.contains(SrcReg)) {
56 for (unsigned I = 0; I < 4; I++) {
57 unsigned SubRegIndex = RI.getSubRegFromChannel(I);
58 buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
59 RI.getSubReg(DestReg, SubRegIndex),
60 RI.getSubReg(SrcReg, SubRegIndex))
62 RegState::Define | RegState::Implicit);
66 // We can't copy vec4 registers
67 assert(!AMDGPU::R600_Reg128RegClass.contains(DestReg)
68 && !AMDGPU::R600_Reg128RegClass.contains(SrcReg));
70 MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
72 NewMI->getOperand(getOperandIdx(*NewMI, R600Operands::SRC0))
77 MachineInstr * R600InstrInfo::getMovImmInstr(MachineFunction *MF,
78 unsigned DstReg, int64_t Imm) const {
79 MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::MOV), DebugLoc());
80 MachineInstrBuilder MIB(*MF, MI);
81 MIB.addReg(DstReg, RegState::Define);
82 MIB.addReg(AMDGPU::ALU_LITERAL_X);
84 MIB.addReg(0); // PREDICATE_BIT
89 unsigned R600InstrInfo::getIEQOpcode() const {
90 return AMDGPU::SETE_INT;
93 bool R600InstrInfo::isMov(unsigned Opcode) const {
97 default: return false;
99 case AMDGPU::MOV_IMM_F32:
100 case AMDGPU::MOV_IMM_I32:
105 // Some instructions act as place holders to emulate operations that the GPU
106 // hardware does automatically. This function can be used to check if
107 // an opcode falls into this category.
108 bool R600InstrInfo::isPlaceHolderOpcode(unsigned Opcode) const {
110 default: return false;
116 bool R600InstrInfo::isReductionOp(unsigned Opcode) const {
118 default: return false;
122 bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
124 default: return false;
125 case AMDGPU::CUBE_r600_pseudo:
126 case AMDGPU::CUBE_r600_real:
127 case AMDGPU::CUBE_eg_pseudo:
128 case AMDGPU::CUBE_eg_real:
133 bool R600InstrInfo::isALUInstr(unsigned Opcode) const {
134 unsigned TargetFlags = get(Opcode).TSFlags;
136 return ((TargetFlags & R600_InstFlag::OP1) |
137 (TargetFlags & R600_InstFlag::OP2) |
138 (TargetFlags & R600_InstFlag::OP3));
141 bool R600InstrInfo::isTransOnly(unsigned Opcode) const {
142 return (get(Opcode).TSFlags & R600_InstFlag::TRANS_ONLY);
145 bool R600InstrInfo::isTransOnly(const MachineInstr *MI) const {
146 return isTransOnly(MI->getOpcode());
149 bool R600InstrInfo::usesVertexCache(unsigned Opcode) const {
150 return ST.hasVertexCache() && IS_VTX(get(Opcode));
153 bool R600InstrInfo::usesVertexCache(const MachineInstr *MI) const {
154 const R600MachineFunctionInfo *MFI = MI->getParent()->getParent()->getInfo<R600MachineFunctionInfo>();
155 return MFI->ShaderType != ShaderType::COMPUTE && usesVertexCache(MI->getOpcode());
158 bool R600InstrInfo::usesTextureCache(unsigned Opcode) const {
159 return (!ST.hasVertexCache() && IS_VTX(get(Opcode))) || IS_TEX(get(Opcode));
162 bool R600InstrInfo::usesTextureCache(const MachineInstr *MI) const {
163 const R600MachineFunctionInfo *MFI = MI->getParent()->getParent()->getInfo<R600MachineFunctionInfo>();
164 return (MFI->ShaderType == ShaderType::COMPUTE && usesVertexCache(MI->getOpcode())) ||
165 usesTextureCache(MI->getOpcode());
168 SmallVector<std::pair<MachineOperand *, int64_t>, 3>
169 R600InstrInfo::getSrcs(MachineInstr *MI) const {
170 SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result;
172 static const R600Operands::Ops OpTable[3][2] = {
173 {R600Operands::SRC0, R600Operands::SRC0_SEL},
174 {R600Operands::SRC1, R600Operands::SRC1_SEL},
175 {R600Operands::SRC2, R600Operands::SRC2_SEL},
178 for (unsigned j = 0; j < 3; j++) {
179 int SrcIdx = getOperandIdx(MI->getOpcode(), OpTable[j][0]);
182 MachineOperand &MO = MI->getOperand(SrcIdx);
183 unsigned Reg = MI->getOperand(SrcIdx).getReg();
184 if (Reg == AMDGPU::ALU_CONST) {
185 unsigned Sel = MI->getOperand(
186 getOperandIdx(MI->getOpcode(), OpTable[j][1])).getImm();
187 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel));
190 if (Reg == AMDGPU::ALU_LITERAL_X) {
191 unsigned Imm = MI->getOperand(
192 getOperandIdx(MI->getOpcode(), R600Operands::IMM)).getImm();
193 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Imm));
196 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, 0));
201 std::vector<std::pair<int, unsigned> >
202 R600InstrInfo::ExtractSrcs(MachineInstr *MI,
203 const DenseMap<unsigned, unsigned> &PV)
205 const SmallVector<std::pair<MachineOperand *, int64_t>, 3> Srcs = getSrcs(MI);
206 const std::pair<int, unsigned> DummyPair(-1, 0);
207 std::vector<std::pair<int, unsigned> > Result;
209 for (unsigned n = Srcs.size(); i < n; ++i) {
210 unsigned Reg = Srcs[i].first->getReg();
211 unsigned Index = RI.getEncodingValue(Reg) & 0xff;
212 unsigned Chan = RI.getHWRegChan(Reg);
214 Result.push_back(DummyPair);
217 if (PV.find(Index) != PV.end()) {
218 Result.push_back(DummyPair);
221 Result.push_back(std::pair<int, unsigned>(Index, Chan));
224 Result.push_back(DummyPair);
228 static std::vector<std::pair<int, unsigned> >
229 Swizzle(std::vector<std::pair<int, unsigned> > Src,
230 R600InstrInfo::BankSwizzle Swz) {
232 case R600InstrInfo::ALU_VEC_012:
234 case R600InstrInfo::ALU_VEC_021:
235 std::swap(Src[1], Src[2]);
237 case R600InstrInfo::ALU_VEC_102:
238 std::swap(Src[0], Src[1]);
240 case R600InstrInfo::ALU_VEC_120:
241 std::swap(Src[0], Src[1]);
242 std::swap(Src[0], Src[2]);
244 case R600InstrInfo::ALU_VEC_201:
245 std::swap(Src[0], Src[2]);
246 std::swap(Src[0], Src[1]);
248 case R600InstrInfo::ALU_VEC_210:
249 std::swap(Src[0], Src[2]);
256 isLegal(const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
257 const std::vector<R600InstrInfo::BankSwizzle> &Swz,
258 unsigned CheckedSize) {
260 memset(Vector, -1, sizeof(Vector));
261 for (unsigned i = 0; i < CheckedSize; i++) {
262 const std::vector<std::pair<int, unsigned> > &Srcs =
263 Swizzle(IGSrcs[i], Swz[i]);
264 for (unsigned j = 0; j < 3; j++) {
265 const std::pair<int, unsigned> &Src = Srcs[j];
268 if (Vector[Src.second][j] < 0)
269 Vector[Src.second][j] = Src.first;
270 if (Vector[Src.second][j] != Src.first)
277 static bool recursiveFitsFPLimitation(
278 const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
279 std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
280 unsigned Depth = 0) {
281 if (!isLegal(IGSrcs, SwzCandidate, Depth))
283 if (IGSrcs.size() == Depth)
285 unsigned i = SwzCandidate[Depth];
287 SwzCandidate[Depth] = (R600InstrInfo::BankSwizzle) i;
288 if (recursiveFitsFPLimitation(IGSrcs, SwzCandidate, Depth + 1))
291 SwzCandidate[Depth] = R600InstrInfo::ALU_VEC_012;
296 R600InstrInfo::fitsReadPortLimitations(const std::vector<MachineInstr *> &IG,
297 const DenseMap<unsigned, unsigned> &PV,
298 std::vector<BankSwizzle> &ValidSwizzle)
300 //Todo : support shared src0 - src1 operand
302 std::vector<std::vector<std::pair<int, unsigned> > > IGSrcs;
303 ValidSwizzle.clear();
304 for (unsigned i = 0, e = IG.size(); i < e; ++i) {
305 IGSrcs.push_back(ExtractSrcs(IG[i], PV));
306 unsigned Op = getOperandIdx(IG[i]->getOpcode(),
307 R600Operands::BANK_SWIZZLE);
308 ValidSwizzle.push_back( (R600InstrInfo::BankSwizzle)
309 IG[i]->getOperand(Op).getImm());
311 bool Result = recursiveFitsFPLimitation(IGSrcs, ValidSwizzle);
319 R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts)
321 assert (Consts.size() <= 12 && "Too many operands in instructions group");
322 unsigned Pair1 = 0, Pair2 = 0;
323 for (unsigned i = 0, n = Consts.size(); i < n; ++i) {
324 unsigned ReadConstHalf = Consts[i] & 2;
325 unsigned ReadConstIndex = Consts[i] & (~3);
326 unsigned ReadHalfConst = ReadConstIndex | ReadConstHalf;
328 Pair1 = ReadHalfConst;
331 if (Pair1 == ReadHalfConst)
334 Pair2 = ReadHalfConst;
337 if (Pair2 != ReadHalfConst)
344 R600InstrInfo::canBundle(const std::vector<MachineInstr *> &MIs) const {
345 std::vector<unsigned> Consts;
346 for (unsigned i = 0, n = MIs.size(); i < n; i++) {
347 MachineInstr *MI = MIs[i];
348 if (!isALUInstr(MI->getOpcode()))
351 const SmallVector<std::pair<MachineOperand *, int64_t>, 3> &Srcs =
354 for (unsigned j = 0, e = Srcs.size(); j < e; j++) {
355 std::pair<MachineOperand *, unsigned> Src = Srcs[j];
356 if (Src.first->getReg() == AMDGPU::ALU_CONST)
357 Consts.push_back(Src.second);
358 if (AMDGPU::R600_KC0RegClass.contains(Src.first->getReg()) ||
359 AMDGPU::R600_KC1RegClass.contains(Src.first->getReg())) {
360 unsigned Index = RI.getEncodingValue(Src.first->getReg()) & 0xff;
361 unsigned Chan = RI.getHWRegChan(Src.first->getReg());
362 Consts.push_back((Index << 2) | Chan);
366 return fitsConstReadLimitations(Consts);
369 DFAPacketizer *R600InstrInfo::CreateTargetScheduleState(const TargetMachine *TM,
370 const ScheduleDAG *DAG) const {
371 const InstrItineraryData *II = TM->getInstrItineraryData();
372 return TM->getSubtarget<AMDGPUSubtarget>().createDFAPacketizer(II);
376 isPredicateSetter(unsigned Opcode) {
385 static MachineInstr *
386 findFirstPredicateSetterFrom(MachineBasicBlock &MBB,
387 MachineBasicBlock::iterator I) {
388 while (I != MBB.begin()) {
390 MachineInstr *MI = I;
391 if (isPredicateSetter(MI->getOpcode()))
399 bool isJump(unsigned Opcode) {
400 return Opcode == AMDGPU::JUMP || Opcode == AMDGPU::JUMP_COND;
404 R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
405 MachineBasicBlock *&TBB,
406 MachineBasicBlock *&FBB,
407 SmallVectorImpl<MachineOperand> &Cond,
408 bool AllowModify) const {
409 // Most of the following comes from the ARM implementation of AnalyzeBranch
411 // If the block has no terminators, it just falls into the block after it.
412 MachineBasicBlock::iterator I = MBB.end();
413 if (I == MBB.begin())
416 while (I->isDebugValue()) {
417 if (I == MBB.begin())
421 if (!isJump(static_cast<MachineInstr *>(I)->getOpcode())) {
425 // Get the last instruction in the block.
426 MachineInstr *LastInst = I;
428 // If there is only one terminator instruction, process it.
429 unsigned LastOpc = LastInst->getOpcode();
430 if (I == MBB.begin() ||
431 !isJump(static_cast<MachineInstr *>(--I)->getOpcode())) {
432 if (LastOpc == AMDGPU::JUMP) {
433 TBB = LastInst->getOperand(0).getMBB();
435 } else if (LastOpc == AMDGPU::JUMP_COND) {
436 MachineInstr *predSet = I;
437 while (!isPredicateSetter(predSet->getOpcode())) {
440 TBB = LastInst->getOperand(0).getMBB();
441 Cond.push_back(predSet->getOperand(1));
442 Cond.push_back(predSet->getOperand(2));
443 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
446 return true; // Can't handle indirect branch.
449 // Get the instruction before it if it is a terminator.
450 MachineInstr *SecondLastInst = I;
451 unsigned SecondLastOpc = SecondLastInst->getOpcode();
453 // If the block ends with a B and a Bcc, handle it.
454 if (SecondLastOpc == AMDGPU::JUMP_COND && LastOpc == AMDGPU::JUMP) {
455 MachineInstr *predSet = --I;
456 while (!isPredicateSetter(predSet->getOpcode())) {
459 TBB = SecondLastInst->getOperand(0).getMBB();
460 FBB = LastInst->getOperand(0).getMBB();
461 Cond.push_back(predSet->getOperand(1));
462 Cond.push_back(predSet->getOperand(2));
463 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
467 // Otherwise, can't handle this.
471 int R600InstrInfo::getBranchInstr(const MachineOperand &op) const {
472 const MachineInstr *MI = op.getParent();
474 switch (MI->getDesc().OpInfo->RegClass) {
475 default: // FIXME: fallthrough??
476 case AMDGPU::GPRI32RegClassID: return AMDGPU::BRANCH_COND_i32;
477 case AMDGPU::GPRF32RegClassID: return AMDGPU::BRANCH_COND_f32;
482 R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
483 MachineBasicBlock *TBB,
484 MachineBasicBlock *FBB,
485 const SmallVectorImpl<MachineOperand> &Cond,
487 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
491 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(TBB);
494 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
495 assert(PredSet && "No previous predicate !");
496 addFlag(PredSet, 0, MO_FLAG_PUSH);
497 PredSet->getOperand(2).setImm(Cond[1].getImm());
499 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
501 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
505 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
506 assert(PredSet && "No previous predicate !");
507 addFlag(PredSet, 0, MO_FLAG_PUSH);
508 PredSet->getOperand(2).setImm(Cond[1].getImm());
509 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
511 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
512 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(FBB);
518 R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
520 // Note : we leave PRED* instructions there.
521 // They may be needed when predicating instructions.
523 MachineBasicBlock::iterator I = MBB.end();
525 if (I == MBB.begin()) {
529 switch (I->getOpcode()) {
532 case AMDGPU::JUMP_COND: {
533 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
534 clearFlag(predSet, 0, MO_FLAG_PUSH);
535 I->eraseFromParent();
539 I->eraseFromParent();
544 if (I == MBB.begin()) {
548 switch (I->getOpcode()) {
549 // FIXME: only one case??
552 case AMDGPU::JUMP_COND: {
553 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
554 clearFlag(predSet, 0, MO_FLAG_PUSH);
555 I->eraseFromParent();
559 I->eraseFromParent();
566 R600InstrInfo::isPredicated(const MachineInstr *MI) const {
567 int idx = MI->findFirstPredOperandIdx();
571 unsigned Reg = MI->getOperand(idx).getReg();
573 default: return false;
574 case AMDGPU::PRED_SEL_ONE:
575 case AMDGPU::PRED_SEL_ZERO:
576 case AMDGPU::PREDICATE_BIT:
582 R600InstrInfo::isPredicable(MachineInstr *MI) const {
583 // XXX: KILL* instructions can be predicated, but they must be the last
584 // instruction in a clause, so this means any instructions after them cannot
585 // be predicated. Until we have proper support for instruction clauses in the
586 // backend, we will mark KILL* instructions as unpredicable.
588 if (MI->getOpcode() == AMDGPU::KILLGT) {
590 } else if (isVector(*MI)) {
593 return AMDGPUInstrInfo::isPredicable(MI);
599 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
601 unsigned ExtraPredCycles,
602 const BranchProbability &Probability) const{
607 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
609 unsigned ExtraTCycles,
610 MachineBasicBlock &FMBB,
612 unsigned ExtraFCycles,
613 const BranchProbability &Probability) const {
618 R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
620 const BranchProbability &Probability)
626 R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
627 MachineBasicBlock &FMBB) const {
633 R600InstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
634 MachineOperand &MO = Cond[1];
635 switch (MO.getImm()) {
636 case OPCODE_IS_ZERO_INT:
637 MO.setImm(OPCODE_IS_NOT_ZERO_INT);
639 case OPCODE_IS_NOT_ZERO_INT:
640 MO.setImm(OPCODE_IS_ZERO_INT);
643 MO.setImm(OPCODE_IS_NOT_ZERO);
645 case OPCODE_IS_NOT_ZERO:
646 MO.setImm(OPCODE_IS_ZERO);
652 MachineOperand &MO2 = Cond[2];
653 switch (MO2.getReg()) {
654 case AMDGPU::PRED_SEL_ZERO:
655 MO2.setReg(AMDGPU::PRED_SEL_ONE);
657 case AMDGPU::PRED_SEL_ONE:
658 MO2.setReg(AMDGPU::PRED_SEL_ZERO);
667 R600InstrInfo::DefinesPredicate(MachineInstr *MI,
668 std::vector<MachineOperand> &Pred) const {
669 return isPredicateSetter(MI->getOpcode());
674 R600InstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
675 const SmallVectorImpl<MachineOperand> &Pred2) const {
681 R600InstrInfo::PredicateInstruction(MachineInstr *MI,
682 const SmallVectorImpl<MachineOperand> &Pred) const {
683 int PIdx = MI->findFirstPredOperandIdx();
686 MachineOperand &PMO = MI->getOperand(PIdx);
687 PMO.setReg(Pred[2].getReg());
688 MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
689 MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
696 unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
697 const MachineInstr *MI,
698 unsigned *PredCost) const {
704 int R600InstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
705 const MachineRegisterInfo &MRI = MF.getRegInfo();
706 const MachineFrameInfo *MFI = MF.getFrameInfo();
709 if (MFI->getNumObjects() == 0) {
713 if (MRI.livein_empty()) {
717 for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
718 LE = MRI.livein_end();
720 Offset = std::max(Offset,
721 GET_REG_INDEX(RI.getEncodingValue(LI->first)));
727 int R600InstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
729 const MachineFrameInfo *MFI = MF.getFrameInfo();
731 // Variable sized objects are not supported
732 assert(!MFI->hasVarSizedObjects());
734 if (MFI->getNumObjects() == 0) {
738 Offset = TM.getFrameLowering()->getFrameIndexOffset(MF, -1);
740 return getIndirectIndexBegin(MF) + Offset;
743 std::vector<unsigned> R600InstrInfo::getIndirectReservedRegs(
744 const MachineFunction &MF) const {
745 const AMDGPUFrameLowering *TFL =
746 static_cast<const AMDGPUFrameLowering*>(TM.getFrameLowering());
747 std::vector<unsigned> Regs;
749 unsigned StackWidth = TFL->getStackWidth(MF);
750 int End = getIndirectIndexEnd(MF);
756 for (int Index = getIndirectIndexBegin(MF); Index <= End; ++Index) {
757 unsigned SuperReg = AMDGPU::R600_Reg128RegClass.getRegister(Index);
758 Regs.push_back(SuperReg);
759 for (unsigned Chan = 0; Chan < StackWidth; ++Chan) {
760 unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister((4 * Index) + Chan);
767 unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex,
768 unsigned Channel) const {
769 // XXX: Remove when we support a stack width > 2
770 assert(Channel == 0);
774 const TargetRegisterClass * R600InstrInfo::getIndirectAddrStoreRegClass(
775 unsigned SourceReg) const {
776 return &AMDGPU::R600_TReg32RegClass;
779 const TargetRegisterClass *R600InstrInfo::getIndirectAddrLoadRegClass() const {
780 return &AMDGPU::TRegMemRegClass;
783 MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
784 MachineBasicBlock::iterator I,
785 unsigned ValueReg, unsigned Address,
786 unsigned OffsetReg) const {
787 unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address);
788 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
789 AMDGPU::AR_X, OffsetReg);
790 setImmOperand(MOVA, R600Operands::WRITE, 0);
792 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
794 .addReg(AMDGPU::AR_X, RegState::Implicit);
795 setImmOperand(Mov, R600Operands::DST_REL, 1);
799 MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
800 MachineBasicBlock::iterator I,
801 unsigned ValueReg, unsigned Address,
802 unsigned OffsetReg) const {
803 unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address);
804 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
807 setImmOperand(MOVA, R600Operands::WRITE, 0);
808 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
811 .addReg(AMDGPU::AR_X, RegState::Implicit);
812 setImmOperand(Mov, R600Operands::SRC0_REL, 1);
817 const TargetRegisterClass *R600InstrInfo::getSuperIndirectRegClass() const {
818 return &AMDGPU::IndirectRegRegClass;
821 unsigned R600InstrInfo::getMaxAlusPerClause() const {
825 MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MBB,
826 MachineBasicBlock::iterator I,
830 unsigned Src1Reg) const {
831 MachineInstrBuilder MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opcode),
835 MIB.addImm(0) // $update_exec_mask
836 .addImm(0); // $update_predicate
838 MIB.addImm(1) // $write
840 .addImm(0) // $dst_rel
841 .addImm(0) // $dst_clamp
842 .addReg(Src0Reg) // $src0
843 .addImm(0) // $src0_neg
844 .addImm(0) // $src0_rel
845 .addImm(0) // $src0_abs
846 .addImm(-1); // $src0_sel
849 MIB.addReg(Src1Reg) // $src1
850 .addImm(0) // $src1_neg
851 .addImm(0) // $src1_rel
852 .addImm(0) // $src1_abs
853 .addImm(-1); // $src1_sel
856 //XXX: The r600g finalizer expects this to be 1, once we've moved the
857 //scheduling to the backend, we can change the default to 0.
858 MIB.addImm(1) // $last
859 .addReg(AMDGPU::PRED_SEL_OFF) // $pred_sel
860 .addImm(0) // $literal
861 .addImm(0); // $bank_swizzle
866 #define OPERAND_CASE(Label) \
868 static const R600Operands::VecOps Ops[] = \
878 static R600Operands::VecOps
879 getSlotedOps(R600Operands::Ops Op, unsigned Slot) {
881 OPERAND_CASE(R600Operands::UPDATE_EXEC_MASK)
882 OPERAND_CASE(R600Operands::UPDATE_PREDICATE)
883 OPERAND_CASE(R600Operands::WRITE)
884 OPERAND_CASE(R600Operands::OMOD)
885 OPERAND_CASE(R600Operands::DST_REL)
886 OPERAND_CASE(R600Operands::CLAMP)
887 OPERAND_CASE(R600Operands::SRC0)
888 OPERAND_CASE(R600Operands::SRC0_NEG)
889 OPERAND_CASE(R600Operands::SRC0_REL)
890 OPERAND_CASE(R600Operands::SRC0_ABS)
891 OPERAND_CASE(R600Operands::SRC0_SEL)
892 OPERAND_CASE(R600Operands::SRC1)
893 OPERAND_CASE(R600Operands::SRC1_NEG)
894 OPERAND_CASE(R600Operands::SRC1_REL)
895 OPERAND_CASE(R600Operands::SRC1_ABS)
896 OPERAND_CASE(R600Operands::SRC1_SEL)
897 OPERAND_CASE(R600Operands::PRED_SEL)
899 llvm_unreachable("Wrong Operand");
906 getVecOperandIdx(R600Operands::VecOps Op) {
911 MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction(
912 MachineBasicBlock &MBB, MachineInstr *MI, unsigned Slot, unsigned DstReg)
914 assert (MI->getOpcode() == AMDGPU::DOT_4 && "Not Implemented");
916 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
917 if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD4XXX)
918 Opcode = AMDGPU::DOT4_r600;
920 Opcode = AMDGPU::DOT4_eg;
921 MachineBasicBlock::iterator I = MI;
922 MachineOperand &Src0 = MI->getOperand(
923 getVecOperandIdx(getSlotedOps(R600Operands::SRC0, Slot)));
924 MachineOperand &Src1 = MI->getOperand(
925 getVecOperandIdx(getSlotedOps(R600Operands::SRC1, Slot)));
926 MachineInstr *MIB = buildDefaultInstruction(
927 MBB, I, Opcode, DstReg, Src0.getReg(), Src1.getReg());
928 static const R600Operands::Ops Operands[14] = {
929 R600Operands::UPDATE_EXEC_MASK,
930 R600Operands::UPDATE_PREDICATE,
933 R600Operands::DST_REL,
935 R600Operands::SRC0_NEG,
936 R600Operands::SRC0_REL,
937 R600Operands::SRC0_ABS,
938 R600Operands::SRC0_SEL,
939 R600Operands::SRC1_NEG,
940 R600Operands::SRC1_REL,
941 R600Operands::SRC1_ABS,
942 R600Operands::SRC1_SEL,
945 for (unsigned i = 0; i < 14; i++) {
946 MachineOperand &MO = MI->getOperand(
947 getVecOperandIdx(getSlotedOps(Operands[i], Slot)));
949 setImmOperand(MIB, Operands[i], MO.getImm());
951 MIB->getOperand(20).setImm(0);
955 MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
956 MachineBasicBlock::iterator I,
958 uint64_t Imm) const {
959 MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg,
960 AMDGPU::ALU_LITERAL_X);
961 setImmOperand(MovImm, R600Operands::IMM, Imm);
965 int R600InstrInfo::getOperandIdx(const MachineInstr &MI,
966 R600Operands::Ops Op) const {
967 return getOperandIdx(MI.getOpcode(), Op);
970 int R600InstrInfo::getOperandIdx(unsigned Opcode,
971 R600Operands::Ops Op) const {
972 unsigned TargetFlags = get(Opcode).TSFlags;
975 if (!HAS_NATIVE_OPERANDS(TargetFlags)) {
977 case R600Operands::DST: return 0;
978 case R600Operands::SRC0: return 1;
979 case R600Operands::SRC1: return 2;
980 case R600Operands::SRC2: return 3;
982 assert(!"Unknown operand type for instruction");
987 if (TargetFlags & R600_InstFlag::OP1) {
989 } else if (TargetFlags & R600_InstFlag::OP2) {
992 assert((TargetFlags & R600_InstFlag::OP3) && "OP1, OP2, or OP3 not defined "
993 "for this instruction");
997 return R600Operands::ALUOpTable[OpTableIdx][Op];
1000 void R600InstrInfo::setImmOperand(MachineInstr *MI, R600Operands::Ops Op,
1001 int64_t Imm) const {
1002 int Idx = getOperandIdx(*MI, Op);
1003 assert(Idx != -1 && "Operand not supported for this instruction.");
1004 assert(MI->getOperand(Idx).isImm());
1005 MI->getOperand(Idx).setImm(Imm);
1008 //===----------------------------------------------------------------------===//
1009 // Instruction flag getters/setters
1010 //===----------------------------------------------------------------------===//
1012 bool R600InstrInfo::hasFlagOperand(const MachineInstr &MI) const {
1013 return GET_FLAG_OPERAND_IDX(get(MI.getOpcode()).TSFlags) != 0;
1016 MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
1017 unsigned Flag) const {
1018 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
1021 // If we pass something other than the default value of Flag to this
1022 // function, it means we are want to set a flag on an instruction
1023 // that uses native encoding.
1024 assert(HAS_NATIVE_OPERANDS(TargetFlags));
1025 bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3;
1028 FlagIndex = getOperandIdx(*MI, R600Operands::CLAMP);
1031 FlagIndex = getOperandIdx(*MI, R600Operands::WRITE);
1033 case MO_FLAG_NOT_LAST:
1035 FlagIndex = getOperandIdx(*MI, R600Operands::LAST);
1039 case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_NEG); break;
1040 case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_NEG); break;
1041 case 2: FlagIndex = getOperandIdx(*MI, R600Operands::SRC2_NEG); break;
1046 assert(!IsOP3 && "Cannot set absolute value modifier for OP3 "
1050 case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_ABS); break;
1051 case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_ABS); break;
1059 assert(FlagIndex != -1 && "Flag not supported for this instruction");
1061 FlagIndex = GET_FLAG_OPERAND_IDX(TargetFlags);
1062 assert(FlagIndex != 0 &&
1063 "Instruction flags not supported for this instruction");
1066 MachineOperand &FlagOp = MI->getOperand(FlagIndex);
1067 assert(FlagOp.isImm());
1071 void R600InstrInfo::addFlag(MachineInstr *MI, unsigned Operand,
1072 unsigned Flag) const {
1073 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
1077 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
1078 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1079 if (Flag == MO_FLAG_NOT_LAST) {
1080 clearFlag(MI, Operand, MO_FLAG_LAST);
1081 } else if (Flag == MO_FLAG_MASK) {
1082 clearFlag(MI, Operand, Flag);
1087 MachineOperand &FlagOp = getFlagOp(MI, Operand);
1088 FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * Operand)));
1092 void R600InstrInfo::clearFlag(MachineInstr *MI, unsigned Operand,
1093 unsigned Flag) const {
1094 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
1095 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
1096 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1099 MachineOperand &FlagOp = getFlagOp(MI);
1100 unsigned InstFlags = FlagOp.getImm();
1101 InstFlags &= ~(Flag << (NUM_MO_FLAGS * Operand));
1102 FlagOp.setImm(InstFlags);