1 //===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief R600 Implementation of TargetInstrInfo.
13 //===----------------------------------------------------------------------===//
15 #include "R600InstrInfo.h"
16 #include "AMDGPUSubtarget.h"
17 #include "AMDGPUTargetMachine.h"
18 #include "R600Defines.h"
19 #include "R600RegisterInfo.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #define GET_INSTRINFO_CTOR
23 #include "AMDGPUGenDFAPacketizer.inc"
27 R600InstrInfo::R600InstrInfo(AMDGPUTargetMachine &tm)
28 : AMDGPUInstrInfo(tm),
32 const R600RegisterInfo &R600InstrInfo::getRegisterInfo() const {
36 bool R600InstrInfo::isTrig(const MachineInstr &MI) const {
37 return get(MI.getOpcode()).TSFlags & R600_InstFlag::TRIG;
40 bool R600InstrInfo::isVector(const MachineInstr &MI) const {
41 return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR;
45 R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
46 MachineBasicBlock::iterator MI, DebugLoc DL,
47 unsigned DestReg, unsigned SrcReg,
49 if (AMDGPU::R600_Reg128RegClass.contains(DestReg)
50 && AMDGPU::R600_Reg128RegClass.contains(SrcReg)) {
51 for (unsigned I = 0; I < 4; I++) {
52 unsigned SubRegIndex = RI.getSubRegFromChannel(I);
53 buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
54 RI.getSubReg(DestReg, SubRegIndex),
55 RI.getSubReg(SrcReg, SubRegIndex))
57 RegState::Define | RegState::Implicit);
61 // We can't copy vec4 registers
62 assert(!AMDGPU::R600_Reg128RegClass.contains(DestReg)
63 && !AMDGPU::R600_Reg128RegClass.contains(SrcReg));
65 MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
67 NewMI->getOperand(getOperandIdx(*NewMI, R600Operands::SRC0))
72 MachineInstr * R600InstrInfo::getMovImmInstr(MachineFunction *MF,
73 unsigned DstReg, int64_t Imm) const {
74 MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::MOV), DebugLoc());
75 MachineInstrBuilder MIB(*MF, MI);
76 MIB.addReg(DstReg, RegState::Define);
77 MIB.addReg(AMDGPU::ALU_LITERAL_X);
79 MIB.addReg(0); // PREDICATE_BIT
84 unsigned R600InstrInfo::getIEQOpcode() const {
85 return AMDGPU::SETE_INT;
88 bool R600InstrInfo::isMov(unsigned Opcode) const {
92 default: return false;
94 case AMDGPU::MOV_IMM_F32:
95 case AMDGPU::MOV_IMM_I32:
100 // Some instructions act as place holders to emulate operations that the GPU
101 // hardware does automatically. This function can be used to check if
102 // an opcode falls into this category.
103 bool R600InstrInfo::isPlaceHolderOpcode(unsigned Opcode) const {
105 default: return false;
107 case AMDGPU::RESERVE_REG:
112 bool R600InstrInfo::isReductionOp(unsigned Opcode) const {
114 default: return false;
115 case AMDGPU::DOT4_r600_pseudo:
116 case AMDGPU::DOT4_eg_pseudo:
121 bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
123 default: return false;
124 case AMDGPU::CUBE_r600_pseudo:
125 case AMDGPU::CUBE_r600_real:
126 case AMDGPU::CUBE_eg_pseudo:
127 case AMDGPU::CUBE_eg_real:
132 bool R600InstrInfo::isALUInstr(unsigned Opcode) const {
133 unsigned TargetFlags = get(Opcode).TSFlags;
135 return ((TargetFlags & R600_InstFlag::OP1) |
136 (TargetFlags & R600_InstFlag::OP2) |
137 (TargetFlags & R600_InstFlag::OP3));
140 DFAPacketizer *R600InstrInfo::CreateTargetScheduleState(const TargetMachine *TM,
141 const ScheduleDAG *DAG) const {
142 const InstrItineraryData *II = TM->getInstrItineraryData();
143 return TM->getSubtarget<AMDGPUSubtarget>().createDFAPacketizer(II);
147 isPredicateSetter(unsigned Opcode) {
156 static MachineInstr *
157 findFirstPredicateSetterFrom(MachineBasicBlock &MBB,
158 MachineBasicBlock::iterator I) {
159 while (I != MBB.begin()) {
161 MachineInstr *MI = I;
162 if (isPredicateSetter(MI->getOpcode()))
170 R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
171 MachineBasicBlock *&TBB,
172 MachineBasicBlock *&FBB,
173 SmallVectorImpl<MachineOperand> &Cond,
174 bool AllowModify) const {
175 // Most of the following comes from the ARM implementation of AnalyzeBranch
177 // If the block has no terminators, it just falls into the block after it.
178 MachineBasicBlock::iterator I = MBB.end();
179 if (I == MBB.begin())
182 while (I->isDebugValue()) {
183 if (I == MBB.begin())
187 if (static_cast<MachineInstr *>(I)->getOpcode() != AMDGPU::JUMP) {
191 // Get the last instruction in the block.
192 MachineInstr *LastInst = I;
194 // If there is only one terminator instruction, process it.
195 unsigned LastOpc = LastInst->getOpcode();
196 if (I == MBB.begin() ||
197 static_cast<MachineInstr *>(--I)->getOpcode() != AMDGPU::JUMP) {
198 if (LastOpc == AMDGPU::JUMP) {
199 if(!isPredicated(LastInst)) {
200 TBB = LastInst->getOperand(0).getMBB();
203 MachineInstr *predSet = I;
204 while (!isPredicateSetter(predSet->getOpcode())) {
207 TBB = LastInst->getOperand(0).getMBB();
208 Cond.push_back(predSet->getOperand(1));
209 Cond.push_back(predSet->getOperand(2));
210 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
214 return true; // Can't handle indirect branch.
217 // Get the instruction before it if it is a terminator.
218 MachineInstr *SecondLastInst = I;
219 unsigned SecondLastOpc = SecondLastInst->getOpcode();
221 // If the block ends with a B and a Bcc, handle it.
222 if (SecondLastOpc == AMDGPU::JUMP &&
223 isPredicated(SecondLastInst) &&
224 LastOpc == AMDGPU::JUMP &&
225 !isPredicated(LastInst)) {
226 MachineInstr *predSet = --I;
227 while (!isPredicateSetter(predSet->getOpcode())) {
230 TBB = SecondLastInst->getOperand(0).getMBB();
231 FBB = LastInst->getOperand(0).getMBB();
232 Cond.push_back(predSet->getOperand(1));
233 Cond.push_back(predSet->getOperand(2));
234 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
238 // Otherwise, can't handle this.
242 int R600InstrInfo::getBranchInstr(const MachineOperand &op) const {
243 const MachineInstr *MI = op.getParent();
245 switch (MI->getDesc().OpInfo->RegClass) {
246 default: // FIXME: fallthrough??
247 case AMDGPU::GPRI32RegClassID: return AMDGPU::BRANCH_COND_i32;
248 case AMDGPU::GPRF32RegClassID: return AMDGPU::BRANCH_COND_f32;
253 R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
254 MachineBasicBlock *TBB,
255 MachineBasicBlock *FBB,
256 const SmallVectorImpl<MachineOperand> &Cond,
258 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
262 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(TBB).addReg(0);
265 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
266 assert(PredSet && "No previous predicate !");
267 addFlag(PredSet, 0, MO_FLAG_PUSH);
268 PredSet->getOperand(2).setImm(Cond[1].getImm());
270 BuildMI(&MBB, DL, get(AMDGPU::JUMP))
272 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
276 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
277 assert(PredSet && "No previous predicate !");
278 addFlag(PredSet, 0, MO_FLAG_PUSH);
279 PredSet->getOperand(2).setImm(Cond[1].getImm());
280 BuildMI(&MBB, DL, get(AMDGPU::JUMP))
282 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
283 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(FBB).addReg(0);
289 R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
291 // Note : we leave PRED* instructions there.
292 // They may be needed when predicating instructions.
294 MachineBasicBlock::iterator I = MBB.end();
296 if (I == MBB.begin()) {
300 switch (I->getOpcode()) {
304 if (isPredicated(I)) {
305 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
306 clearFlag(predSet, 0, MO_FLAG_PUSH);
308 I->eraseFromParent();
313 if (I == MBB.begin()) {
317 switch (I->getOpcode()) {
318 // FIXME: only one case??
322 if (isPredicated(I)) {
323 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
324 clearFlag(predSet, 0, MO_FLAG_PUSH);
326 I->eraseFromParent();
333 R600InstrInfo::isPredicated(const MachineInstr *MI) const {
334 int idx = MI->findFirstPredOperandIdx();
338 unsigned Reg = MI->getOperand(idx).getReg();
340 default: return false;
341 case AMDGPU::PRED_SEL_ONE:
342 case AMDGPU::PRED_SEL_ZERO:
343 case AMDGPU::PREDICATE_BIT:
349 R600InstrInfo::isPredicable(MachineInstr *MI) const {
350 // XXX: KILL* instructions can be predicated, but they must be the last
351 // instruction in a clause, so this means any instructions after them cannot
352 // be predicated. Until we have proper support for instruction clauses in the
353 // backend, we will mark KILL* instructions as unpredicable.
355 if (MI->getOpcode() == AMDGPU::KILLGT) {
358 return AMDGPUInstrInfo::isPredicable(MI);
364 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
366 unsigned ExtraPredCycles,
367 const BranchProbability &Probability) const{
372 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
374 unsigned ExtraTCycles,
375 MachineBasicBlock &FMBB,
377 unsigned ExtraFCycles,
378 const BranchProbability &Probability) const {
383 R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
385 const BranchProbability &Probability)
391 R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
392 MachineBasicBlock &FMBB) const {
398 R600InstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
399 MachineOperand &MO = Cond[1];
400 switch (MO.getImm()) {
401 case OPCODE_IS_ZERO_INT:
402 MO.setImm(OPCODE_IS_NOT_ZERO_INT);
404 case OPCODE_IS_NOT_ZERO_INT:
405 MO.setImm(OPCODE_IS_ZERO_INT);
408 MO.setImm(OPCODE_IS_NOT_ZERO);
410 case OPCODE_IS_NOT_ZERO:
411 MO.setImm(OPCODE_IS_ZERO);
417 MachineOperand &MO2 = Cond[2];
418 switch (MO2.getReg()) {
419 case AMDGPU::PRED_SEL_ZERO:
420 MO2.setReg(AMDGPU::PRED_SEL_ONE);
422 case AMDGPU::PRED_SEL_ONE:
423 MO2.setReg(AMDGPU::PRED_SEL_ZERO);
432 R600InstrInfo::DefinesPredicate(MachineInstr *MI,
433 std::vector<MachineOperand> &Pred) const {
434 return isPredicateSetter(MI->getOpcode());
439 R600InstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
440 const SmallVectorImpl<MachineOperand> &Pred2) const {
446 R600InstrInfo::PredicateInstruction(MachineInstr *MI,
447 const SmallVectorImpl<MachineOperand> &Pred) const {
448 int PIdx = MI->findFirstPredOperandIdx();
451 MachineOperand &PMO = MI->getOperand(PIdx);
452 PMO.setReg(Pred[2].getReg());
453 MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
454 MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
461 unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
462 const MachineInstr *MI,
463 unsigned *PredCost) const {
469 MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MBB,
470 MachineBasicBlock::iterator I,
474 unsigned Src1Reg) const {
475 MachineInstrBuilder MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opcode),
479 MIB.addImm(0) // $update_exec_mask
480 .addImm(0); // $update_predicate
482 MIB.addImm(1) // $write
484 .addImm(0) // $dst_rel
485 .addImm(0) // $dst_clamp
486 .addReg(Src0Reg) // $src0
487 .addImm(0) // $src0_neg
488 .addImm(0) // $src0_rel
489 .addImm(0) // $src0_abs
490 .addImm(-1); // $src0_sel
493 MIB.addReg(Src1Reg) // $src1
494 .addImm(0) // $src1_neg
495 .addImm(0) // $src1_rel
496 .addImm(0) // $src1_abs
497 .addImm(-1); // $src1_sel
500 //XXX: The r600g finalizer expects this to be 1, once we've moved the
501 //scheduling to the backend, we can change the default to 0.
502 MIB.addImm(1) // $last
503 .addReg(AMDGPU::PRED_SEL_OFF) // $pred_sel
504 .addImm(0); // $literal
509 MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
510 MachineBasicBlock::iterator I,
512 uint64_t Imm) const {
513 MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg,
514 AMDGPU::ALU_LITERAL_X);
515 setImmOperand(MovImm, R600Operands::IMM, Imm);
519 int R600InstrInfo::getOperandIdx(const MachineInstr &MI,
520 R600Operands::Ops Op) const {
521 return getOperandIdx(MI.getOpcode(), Op);
524 int R600InstrInfo::getOperandIdx(unsigned Opcode,
525 R600Operands::Ops Op) const {
526 unsigned TargetFlags = get(Opcode).TSFlags;
529 if (!HAS_NATIVE_OPERANDS(TargetFlags)) {
531 case R600Operands::DST: return 0;
532 case R600Operands::SRC0: return 1;
533 case R600Operands::SRC1: return 2;
534 case R600Operands::SRC2: return 3;
536 assert(!"Unknown operand type for instruction");
541 if (TargetFlags & R600_InstFlag::OP1) {
543 } else if (TargetFlags & R600_InstFlag::OP2) {
546 assert((TargetFlags & R600_InstFlag::OP3) && "OP1, OP2, or OP3 not defined "
547 "for this instruction");
551 return R600Operands::ALUOpTable[OpTableIdx][Op];
554 void R600InstrInfo::setImmOperand(MachineInstr *MI, R600Operands::Ops Op,
556 int Idx = getOperandIdx(*MI, Op);
557 assert(Idx != -1 && "Operand not supported for this instruction.");
558 assert(MI->getOperand(Idx).isImm());
559 MI->getOperand(Idx).setImm(Imm);
562 //===----------------------------------------------------------------------===//
563 // Instruction flag getters/setters
564 //===----------------------------------------------------------------------===//
566 bool R600InstrInfo::hasFlagOperand(const MachineInstr &MI) const {
567 return GET_FLAG_OPERAND_IDX(get(MI.getOpcode()).TSFlags) != 0;
570 MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
571 unsigned Flag) const {
572 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
575 // If we pass something other than the default value of Flag to this
576 // function, it means we are want to set a flag on an instruction
577 // that uses native encoding.
578 assert(HAS_NATIVE_OPERANDS(TargetFlags));
579 bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3;
582 FlagIndex = getOperandIdx(*MI, R600Operands::CLAMP);
585 FlagIndex = getOperandIdx(*MI, R600Operands::WRITE);
587 case MO_FLAG_NOT_LAST:
589 FlagIndex = getOperandIdx(*MI, R600Operands::LAST);
593 case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_NEG); break;
594 case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_NEG); break;
595 case 2: FlagIndex = getOperandIdx(*MI, R600Operands::SRC2_NEG); break;
600 assert(!IsOP3 && "Cannot set absolute value modifier for OP3 "
604 case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_ABS); break;
605 case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_ABS); break;
613 assert(FlagIndex != -1 && "Flag not supported for this instruction");
615 FlagIndex = GET_FLAG_OPERAND_IDX(TargetFlags);
616 assert(FlagIndex != 0 &&
617 "Instruction flags not supported for this instruction");
620 MachineOperand &FlagOp = MI->getOperand(FlagIndex);
621 assert(FlagOp.isImm());
625 void R600InstrInfo::addFlag(MachineInstr *MI, unsigned Operand,
626 unsigned Flag) const {
627 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
631 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
632 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
633 if (Flag == MO_FLAG_NOT_LAST) {
634 clearFlag(MI, Operand, MO_FLAG_LAST);
635 } else if (Flag == MO_FLAG_MASK) {
636 clearFlag(MI, Operand, Flag);
641 MachineOperand &FlagOp = getFlagOp(MI, Operand);
642 FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * Operand)));
646 void R600InstrInfo::clearFlag(MachineInstr *MI, unsigned Operand,
647 unsigned Flag) const {
648 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
649 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
650 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
653 MachineOperand &FlagOp = getFlagOp(MI);
654 unsigned InstFlags = FlagOp.getImm();
655 InstFlags &= ~(Flag << (NUM_MO_FLAGS * Operand));
656 FlagOp.setImm(InstFlags);