1 //===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief R600 Implementation of TargetInstrInfo.
13 //===----------------------------------------------------------------------===//
15 #include "R600InstrInfo.h"
17 #include "AMDGPUSubtarget.h"
18 #include "AMDGPUTargetMachine.h"
19 #include "R600Defines.h"
20 #include "R600MachineFunctionInfo.h"
21 #include "R600RegisterInfo.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #define GET_INSTRINFO_CTOR
27 #include "AMDGPUGenDFAPacketizer.inc"
31 R600InstrInfo::R600InstrInfo(AMDGPUTargetMachine &tm)
32 : AMDGPUInstrInfo(tm),
34 ST(tm.getSubtarget<AMDGPUSubtarget>())
37 const R600RegisterInfo &R600InstrInfo::getRegisterInfo() const {
41 bool R600InstrInfo::isTrig(const MachineInstr &MI) const {
42 return get(MI.getOpcode()).TSFlags & R600_InstFlag::TRIG;
45 bool R600InstrInfo::isVector(const MachineInstr &MI) const {
46 return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR;
50 R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
51 MachineBasicBlock::iterator MI, DebugLoc DL,
52 unsigned DestReg, unsigned SrcReg,
54 if (AMDGPU::R600_Reg128RegClass.contains(DestReg)
55 && AMDGPU::R600_Reg128RegClass.contains(SrcReg)) {
56 for (unsigned I = 0; I < 4; I++) {
57 unsigned SubRegIndex = RI.getSubRegFromChannel(I);
58 buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
59 RI.getSubReg(DestReg, SubRegIndex),
60 RI.getSubReg(SrcReg, SubRegIndex))
62 RegState::Define | RegState::Implicit);
66 // We can't copy vec4 registers
67 assert(!AMDGPU::R600_Reg128RegClass.contains(DestReg)
68 && !AMDGPU::R600_Reg128RegClass.contains(SrcReg));
70 MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
72 NewMI->getOperand(getOperandIdx(*NewMI, R600Operands::SRC0))
77 MachineInstr * R600InstrInfo::getMovImmInstr(MachineFunction *MF,
78 unsigned DstReg, int64_t Imm) const {
79 MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::MOV), DebugLoc());
80 MachineInstrBuilder MIB(*MF, MI);
81 MIB.addReg(DstReg, RegState::Define);
82 MIB.addReg(AMDGPU::ALU_LITERAL_X);
84 MIB.addReg(0); // PREDICATE_BIT
89 unsigned R600InstrInfo::getIEQOpcode() const {
90 return AMDGPU::SETE_INT;
93 bool R600InstrInfo::isMov(unsigned Opcode) const {
97 default: return false;
99 case AMDGPU::MOV_IMM_F32:
100 case AMDGPU::MOV_IMM_I32:
105 // Some instructions act as place holders to emulate operations that the GPU
106 // hardware does automatically. This function can be used to check if
107 // an opcode falls into this category.
108 bool R600InstrInfo::isPlaceHolderOpcode(unsigned Opcode) const {
110 default: return false;
116 bool R600InstrInfo::isReductionOp(unsigned Opcode) const {
118 default: return false;
119 case AMDGPU::DOT4_r600_pseudo:
120 case AMDGPU::DOT4_eg_pseudo:
125 bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
127 default: return false;
128 case AMDGPU::CUBE_r600_pseudo:
129 case AMDGPU::CUBE_r600_real:
130 case AMDGPU::CUBE_eg_pseudo:
131 case AMDGPU::CUBE_eg_real:
136 bool R600InstrInfo::isALUInstr(unsigned Opcode) const {
137 unsigned TargetFlags = get(Opcode).TSFlags;
139 return ((TargetFlags & R600_InstFlag::OP1) |
140 (TargetFlags & R600_InstFlag::OP2) |
141 (TargetFlags & R600_InstFlag::OP3));
144 bool R600InstrInfo::isTransOnly(unsigned Opcode) const {
145 return (get(Opcode).TSFlags & R600_InstFlag::TRANS_ONLY);
148 bool R600InstrInfo::isTransOnly(const MachineInstr *MI) const {
149 return isTransOnly(MI->getOpcode());
152 bool R600InstrInfo::usesVertexCache(unsigned Opcode) const {
153 return ST.hasVertexCache() && get(Opcode).TSFlags & R600_InstFlag::VTX_INST;
156 bool R600InstrInfo::usesVertexCache(const MachineInstr *MI) const {
157 const R600MachineFunctionInfo *MFI = MI->getParent()->getParent()->getInfo<R600MachineFunctionInfo>();
158 return MFI->ShaderType != ShaderType::COMPUTE && usesVertexCache(MI->getOpcode());
161 bool R600InstrInfo::usesTextureCache(unsigned Opcode) const {
162 return (!ST.hasVertexCache() && get(Opcode).TSFlags & R600_InstFlag::VTX_INST) ||
163 (get(Opcode).TSFlags & R600_InstFlag::TEX_INST);
166 bool R600InstrInfo::usesTextureCache(const MachineInstr *MI) const {
167 const R600MachineFunctionInfo *MFI = MI->getParent()->getParent()->getInfo<R600MachineFunctionInfo>();
168 return (MFI->ShaderType == ShaderType::COMPUTE && usesVertexCache(MI->getOpcode())) ||
169 usesTextureCache(MI->getOpcode());
173 R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts)
175 assert (Consts.size() <= 12 && "Too many operands in instructions group");
176 unsigned Pair1 = 0, Pair2 = 0;
177 for (unsigned i = 0, n = Consts.size(); i < n; ++i) {
178 unsigned ReadConstHalf = Consts[i] & 2;
179 unsigned ReadConstIndex = Consts[i] & (~3);
180 unsigned ReadHalfConst = ReadConstIndex | ReadConstHalf;
182 Pair1 = ReadHalfConst;
185 if (Pair1 == ReadHalfConst)
188 Pair2 = ReadHalfConst;
191 if (Pair2 != ReadHalfConst)
198 R600InstrInfo::canBundle(const std::vector<MachineInstr *> &MIs) const {
199 std::vector<unsigned> Consts;
200 for (unsigned i = 0, n = MIs.size(); i < n; i++) {
201 const MachineInstr *MI = MIs[i];
203 const R600Operands::Ops OpTable[3][2] = {
204 {R600Operands::SRC0, R600Operands::SRC0_SEL},
205 {R600Operands::SRC1, R600Operands::SRC1_SEL},
206 {R600Operands::SRC2, R600Operands::SRC2_SEL},
209 if (!isALUInstr(MI->getOpcode()))
212 for (unsigned j = 0; j < 3; j++) {
213 int SrcIdx = getOperandIdx(MI->getOpcode(), OpTable[j][0]);
216 unsigned Reg = MI->getOperand(SrcIdx).getReg();
217 if (Reg == AMDGPU::ALU_CONST) {
218 unsigned Const = MI->getOperand(
219 getOperandIdx(MI->getOpcode(), OpTable[j][1])).getImm();
220 Consts.push_back(Const);
223 if (AMDGPU::R600_KC0RegClass.contains(Reg) ||
224 AMDGPU::R600_KC1RegClass.contains(Reg)) {
225 unsigned Index = RI.getEncodingValue(Reg) & 0xff;
226 unsigned Chan = RI.getHWRegChan(Reg);
227 Consts.push_back((Index << 2) | Chan);
232 return fitsConstReadLimitations(Consts);
235 DFAPacketizer *R600InstrInfo::CreateTargetScheduleState(const TargetMachine *TM,
236 const ScheduleDAG *DAG) const {
237 const InstrItineraryData *II = TM->getInstrItineraryData();
238 return TM->getSubtarget<AMDGPUSubtarget>().createDFAPacketizer(II);
242 isPredicateSetter(unsigned Opcode) {
251 static MachineInstr *
252 findFirstPredicateSetterFrom(MachineBasicBlock &MBB,
253 MachineBasicBlock::iterator I) {
254 while (I != MBB.begin()) {
256 MachineInstr *MI = I;
257 if (isPredicateSetter(MI->getOpcode()))
265 bool isJump(unsigned Opcode) {
266 return Opcode == AMDGPU::JUMP || Opcode == AMDGPU::JUMP_COND;
270 R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
271 MachineBasicBlock *&TBB,
272 MachineBasicBlock *&FBB,
273 SmallVectorImpl<MachineOperand> &Cond,
274 bool AllowModify) const {
275 // Most of the following comes from the ARM implementation of AnalyzeBranch
277 // If the block has no terminators, it just falls into the block after it.
278 MachineBasicBlock::iterator I = MBB.end();
279 if (I == MBB.begin())
282 while (I->isDebugValue()) {
283 if (I == MBB.begin())
287 if (!isJump(static_cast<MachineInstr *>(I)->getOpcode())) {
291 // Get the last instruction in the block.
292 MachineInstr *LastInst = I;
294 // If there is only one terminator instruction, process it.
295 unsigned LastOpc = LastInst->getOpcode();
296 if (I == MBB.begin() ||
297 !isJump(static_cast<MachineInstr *>(--I)->getOpcode())) {
298 if (LastOpc == AMDGPU::JUMP) {
299 TBB = LastInst->getOperand(0).getMBB();
301 } else if (LastOpc == AMDGPU::JUMP_COND) {
302 MachineInstr *predSet = I;
303 while (!isPredicateSetter(predSet->getOpcode())) {
306 TBB = LastInst->getOperand(0).getMBB();
307 Cond.push_back(predSet->getOperand(1));
308 Cond.push_back(predSet->getOperand(2));
309 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
312 return true; // Can't handle indirect branch.
315 // Get the instruction before it if it is a terminator.
316 MachineInstr *SecondLastInst = I;
317 unsigned SecondLastOpc = SecondLastInst->getOpcode();
319 // If the block ends with a B and a Bcc, handle it.
320 if (SecondLastOpc == AMDGPU::JUMP_COND && LastOpc == AMDGPU::JUMP) {
321 MachineInstr *predSet = --I;
322 while (!isPredicateSetter(predSet->getOpcode())) {
325 TBB = SecondLastInst->getOperand(0).getMBB();
326 FBB = LastInst->getOperand(0).getMBB();
327 Cond.push_back(predSet->getOperand(1));
328 Cond.push_back(predSet->getOperand(2));
329 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
333 // Otherwise, can't handle this.
337 int R600InstrInfo::getBranchInstr(const MachineOperand &op) const {
338 const MachineInstr *MI = op.getParent();
340 switch (MI->getDesc().OpInfo->RegClass) {
341 default: // FIXME: fallthrough??
342 case AMDGPU::GPRI32RegClassID: return AMDGPU::BRANCH_COND_i32;
343 case AMDGPU::GPRF32RegClassID: return AMDGPU::BRANCH_COND_f32;
348 R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
349 MachineBasicBlock *TBB,
350 MachineBasicBlock *FBB,
351 const SmallVectorImpl<MachineOperand> &Cond,
353 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
357 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(TBB);
360 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
361 assert(PredSet && "No previous predicate !");
362 addFlag(PredSet, 0, MO_FLAG_PUSH);
363 PredSet->getOperand(2).setImm(Cond[1].getImm());
365 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
367 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
371 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
372 assert(PredSet && "No previous predicate !");
373 addFlag(PredSet, 0, MO_FLAG_PUSH);
374 PredSet->getOperand(2).setImm(Cond[1].getImm());
375 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
377 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
378 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(FBB);
384 R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
386 // Note : we leave PRED* instructions there.
387 // They may be needed when predicating instructions.
389 MachineBasicBlock::iterator I = MBB.end();
391 if (I == MBB.begin()) {
395 switch (I->getOpcode()) {
398 case AMDGPU::JUMP_COND: {
399 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
400 clearFlag(predSet, 0, MO_FLAG_PUSH);
401 I->eraseFromParent();
405 I->eraseFromParent();
410 if (I == MBB.begin()) {
414 switch (I->getOpcode()) {
415 // FIXME: only one case??
418 case AMDGPU::JUMP_COND: {
419 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
420 clearFlag(predSet, 0, MO_FLAG_PUSH);
421 I->eraseFromParent();
425 I->eraseFromParent();
432 R600InstrInfo::isPredicated(const MachineInstr *MI) const {
433 int idx = MI->findFirstPredOperandIdx();
437 unsigned Reg = MI->getOperand(idx).getReg();
439 default: return false;
440 case AMDGPU::PRED_SEL_ONE:
441 case AMDGPU::PRED_SEL_ZERO:
442 case AMDGPU::PREDICATE_BIT:
448 R600InstrInfo::isPredicable(MachineInstr *MI) const {
449 // XXX: KILL* instructions can be predicated, but they must be the last
450 // instruction in a clause, so this means any instructions after them cannot
451 // be predicated. Until we have proper support for instruction clauses in the
452 // backend, we will mark KILL* instructions as unpredicable.
454 if (MI->getOpcode() == AMDGPU::KILLGT) {
456 } else if (isVector(*MI)) {
459 return AMDGPUInstrInfo::isPredicable(MI);
465 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
467 unsigned ExtraPredCycles,
468 const BranchProbability &Probability) const{
473 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
475 unsigned ExtraTCycles,
476 MachineBasicBlock &FMBB,
478 unsigned ExtraFCycles,
479 const BranchProbability &Probability) const {
484 R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
486 const BranchProbability &Probability)
492 R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
493 MachineBasicBlock &FMBB) const {
499 R600InstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
500 MachineOperand &MO = Cond[1];
501 switch (MO.getImm()) {
502 case OPCODE_IS_ZERO_INT:
503 MO.setImm(OPCODE_IS_NOT_ZERO_INT);
505 case OPCODE_IS_NOT_ZERO_INT:
506 MO.setImm(OPCODE_IS_ZERO_INT);
509 MO.setImm(OPCODE_IS_NOT_ZERO);
511 case OPCODE_IS_NOT_ZERO:
512 MO.setImm(OPCODE_IS_ZERO);
518 MachineOperand &MO2 = Cond[2];
519 switch (MO2.getReg()) {
520 case AMDGPU::PRED_SEL_ZERO:
521 MO2.setReg(AMDGPU::PRED_SEL_ONE);
523 case AMDGPU::PRED_SEL_ONE:
524 MO2.setReg(AMDGPU::PRED_SEL_ZERO);
533 R600InstrInfo::DefinesPredicate(MachineInstr *MI,
534 std::vector<MachineOperand> &Pred) const {
535 return isPredicateSetter(MI->getOpcode());
540 R600InstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
541 const SmallVectorImpl<MachineOperand> &Pred2) const {
547 R600InstrInfo::PredicateInstruction(MachineInstr *MI,
548 const SmallVectorImpl<MachineOperand> &Pred) const {
549 int PIdx = MI->findFirstPredOperandIdx();
552 MachineOperand &PMO = MI->getOperand(PIdx);
553 PMO.setReg(Pred[2].getReg());
554 MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
555 MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
562 unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
563 const MachineInstr *MI,
564 unsigned *PredCost) const {
570 int R600InstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
571 const MachineRegisterInfo &MRI = MF.getRegInfo();
572 const MachineFrameInfo *MFI = MF.getFrameInfo();
575 if (MFI->getNumObjects() == 0) {
579 if (MRI.livein_empty()) {
583 for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
584 LE = MRI.livein_end();
586 Offset = std::max(Offset,
587 GET_REG_INDEX(RI.getEncodingValue(LI->first)));
593 int R600InstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
595 const MachineFrameInfo *MFI = MF.getFrameInfo();
597 // Variable sized objects are not supported
598 assert(!MFI->hasVarSizedObjects());
600 if (MFI->getNumObjects() == 0) {
604 Offset = TM.getFrameLowering()->getFrameIndexOffset(MF, -1);
606 return getIndirectIndexBegin(MF) + Offset;
609 std::vector<unsigned> R600InstrInfo::getIndirectReservedRegs(
610 const MachineFunction &MF) const {
611 const AMDGPUFrameLowering *TFL =
612 static_cast<const AMDGPUFrameLowering*>(TM.getFrameLowering());
613 std::vector<unsigned> Regs;
615 unsigned StackWidth = TFL->getStackWidth(MF);
616 int End = getIndirectIndexEnd(MF);
622 for (int Index = getIndirectIndexBegin(MF); Index <= End; ++Index) {
623 unsigned SuperReg = AMDGPU::R600_Reg128RegClass.getRegister(Index);
624 Regs.push_back(SuperReg);
625 for (unsigned Chan = 0; Chan < StackWidth; ++Chan) {
626 unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister((4 * Index) + Chan);
633 unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex,
634 unsigned Channel) const {
635 // XXX: Remove when we support a stack width > 2
636 assert(Channel == 0);
640 const TargetRegisterClass * R600InstrInfo::getIndirectAddrStoreRegClass(
641 unsigned SourceReg) const {
642 return &AMDGPU::R600_TReg32RegClass;
645 const TargetRegisterClass *R600InstrInfo::getIndirectAddrLoadRegClass() const {
646 return &AMDGPU::TRegMemRegClass;
649 MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
650 MachineBasicBlock::iterator I,
651 unsigned ValueReg, unsigned Address,
652 unsigned OffsetReg) const {
653 unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address);
654 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
655 AMDGPU::AR_X, OffsetReg);
656 setImmOperand(MOVA, R600Operands::WRITE, 0);
658 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
660 .addReg(AMDGPU::AR_X, RegState::Implicit);
661 setImmOperand(Mov, R600Operands::DST_REL, 1);
665 MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
666 MachineBasicBlock::iterator I,
667 unsigned ValueReg, unsigned Address,
668 unsigned OffsetReg) const {
669 unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address);
670 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
673 setImmOperand(MOVA, R600Operands::WRITE, 0);
674 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
677 .addReg(AMDGPU::AR_X, RegState::Implicit);
678 setImmOperand(Mov, R600Operands::SRC0_REL, 1);
683 const TargetRegisterClass *R600InstrInfo::getSuperIndirectRegClass() const {
684 return &AMDGPU::IndirectRegRegClass;
687 unsigned R600InstrInfo::getMaxAlusPerClause() const {
691 MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MBB,
692 MachineBasicBlock::iterator I,
696 unsigned Src1Reg) const {
697 MachineInstrBuilder MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opcode),
701 MIB.addImm(0) // $update_exec_mask
702 .addImm(0); // $update_predicate
704 MIB.addImm(1) // $write
706 .addImm(0) // $dst_rel
707 .addImm(0) // $dst_clamp
708 .addReg(Src0Reg) // $src0
709 .addImm(0) // $src0_neg
710 .addImm(0) // $src0_rel
711 .addImm(0) // $src0_abs
712 .addImm(-1); // $src0_sel
715 MIB.addReg(Src1Reg) // $src1
716 .addImm(0) // $src1_neg
717 .addImm(0) // $src1_rel
718 .addImm(0) // $src1_abs
719 .addImm(-1); // $src1_sel
722 //XXX: The r600g finalizer expects this to be 1, once we've moved the
723 //scheduling to the backend, we can change the default to 0.
724 MIB.addImm(1) // $last
725 .addReg(AMDGPU::PRED_SEL_OFF) // $pred_sel
726 .addImm(0) // $literal
727 .addImm(0); // $bank_swizzle
732 MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
733 MachineBasicBlock::iterator I,
735 uint64_t Imm) const {
736 MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg,
737 AMDGPU::ALU_LITERAL_X);
738 setImmOperand(MovImm, R600Operands::IMM, Imm);
742 int R600InstrInfo::getOperandIdx(const MachineInstr &MI,
743 R600Operands::Ops Op) const {
744 return getOperandIdx(MI.getOpcode(), Op);
747 int R600InstrInfo::getOperandIdx(unsigned Opcode,
748 R600Operands::Ops Op) const {
749 unsigned TargetFlags = get(Opcode).TSFlags;
752 if (!HAS_NATIVE_OPERANDS(TargetFlags)) {
754 case R600Operands::DST: return 0;
755 case R600Operands::SRC0: return 1;
756 case R600Operands::SRC1: return 2;
757 case R600Operands::SRC2: return 3;
759 assert(!"Unknown operand type for instruction");
764 if (TargetFlags & R600_InstFlag::OP1) {
766 } else if (TargetFlags & R600_InstFlag::OP2) {
769 assert((TargetFlags & R600_InstFlag::OP3) && "OP1, OP2, or OP3 not defined "
770 "for this instruction");
774 return R600Operands::ALUOpTable[OpTableIdx][Op];
777 void R600InstrInfo::setImmOperand(MachineInstr *MI, R600Operands::Ops Op,
779 int Idx = getOperandIdx(*MI, Op);
780 assert(Idx != -1 && "Operand not supported for this instruction.");
781 assert(MI->getOperand(Idx).isImm());
782 MI->getOperand(Idx).setImm(Imm);
785 //===----------------------------------------------------------------------===//
786 // Instruction flag getters/setters
787 //===----------------------------------------------------------------------===//
789 bool R600InstrInfo::hasFlagOperand(const MachineInstr &MI) const {
790 return GET_FLAG_OPERAND_IDX(get(MI.getOpcode()).TSFlags) != 0;
793 MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
794 unsigned Flag) const {
795 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
798 // If we pass something other than the default value of Flag to this
799 // function, it means we are want to set a flag on an instruction
800 // that uses native encoding.
801 assert(HAS_NATIVE_OPERANDS(TargetFlags));
802 bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3;
805 FlagIndex = getOperandIdx(*MI, R600Operands::CLAMP);
808 FlagIndex = getOperandIdx(*MI, R600Operands::WRITE);
810 case MO_FLAG_NOT_LAST:
812 FlagIndex = getOperandIdx(*MI, R600Operands::LAST);
816 case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_NEG); break;
817 case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_NEG); break;
818 case 2: FlagIndex = getOperandIdx(*MI, R600Operands::SRC2_NEG); break;
823 assert(!IsOP3 && "Cannot set absolute value modifier for OP3 "
827 case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_ABS); break;
828 case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_ABS); break;
836 assert(FlagIndex != -1 && "Flag not supported for this instruction");
838 FlagIndex = GET_FLAG_OPERAND_IDX(TargetFlags);
839 assert(FlagIndex != 0 &&
840 "Instruction flags not supported for this instruction");
843 MachineOperand &FlagOp = MI->getOperand(FlagIndex);
844 assert(FlagOp.isImm());
848 void R600InstrInfo::addFlag(MachineInstr *MI, unsigned Operand,
849 unsigned Flag) const {
850 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
854 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
855 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
856 if (Flag == MO_FLAG_NOT_LAST) {
857 clearFlag(MI, Operand, MO_FLAG_LAST);
858 } else if (Flag == MO_FLAG_MASK) {
859 clearFlag(MI, Operand, Flag);
864 MachineOperand &FlagOp = getFlagOp(MI, Operand);
865 FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * Operand)));
869 void R600InstrInfo::clearFlag(MachineInstr *MI, unsigned Operand,
870 unsigned Flag) const {
871 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
872 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
873 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
876 MachineOperand &FlagOp = getFlagOp(MI);
877 unsigned InstFlags = FlagOp.getImm();
878 InstFlags &= ~(Flag << (NUM_MO_FLAGS * Operand));
879 FlagOp.setImm(InstFlags);