1 //===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief R600 Implementation of TargetInstrInfo.
13 //===----------------------------------------------------------------------===//
15 #include "R600InstrInfo.h"
16 #include "AMDGPUSubtarget.h"
17 #include "AMDGPUTargetMachine.h"
18 #include "R600Defines.h"
19 #include "R600MachineFunctionInfo.h"
20 #include "R600RegisterInfo.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #define GET_INSTRINFO_CTOR
26 #include "AMDGPUGenDFAPacketizer.inc"
30 R600InstrInfo::R600InstrInfo(AMDGPUTargetMachine &tm)
31 : AMDGPUInstrInfo(tm),
33 ST(tm.getSubtarget<AMDGPUSubtarget>())
36 const R600RegisterInfo &R600InstrInfo::getRegisterInfo() const {
40 bool R600InstrInfo::isTrig(const MachineInstr &MI) const {
41 return get(MI.getOpcode()).TSFlags & R600_InstFlag::TRIG;
44 bool R600InstrInfo::isVector(const MachineInstr &MI) const {
45 return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR;
49 R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
50 MachineBasicBlock::iterator MI, DebugLoc DL,
51 unsigned DestReg, unsigned SrcReg,
53 if (AMDGPU::R600_Reg128RegClass.contains(DestReg)
54 && AMDGPU::R600_Reg128RegClass.contains(SrcReg)) {
55 for (unsigned I = 0; I < 4; I++) {
56 unsigned SubRegIndex = RI.getSubRegFromChannel(I);
57 buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
58 RI.getSubReg(DestReg, SubRegIndex),
59 RI.getSubReg(SrcReg, SubRegIndex))
61 RegState::Define | RegState::Implicit);
65 // We can't copy vec4 registers
66 assert(!AMDGPU::R600_Reg128RegClass.contains(DestReg)
67 && !AMDGPU::R600_Reg128RegClass.contains(SrcReg));
69 MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
71 NewMI->getOperand(getOperandIdx(*NewMI, R600Operands::SRC0))
76 MachineInstr * R600InstrInfo::getMovImmInstr(MachineFunction *MF,
77 unsigned DstReg, int64_t Imm) const {
78 MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::MOV), DebugLoc());
79 MachineInstrBuilder MIB(*MF, MI);
80 MIB.addReg(DstReg, RegState::Define);
81 MIB.addReg(AMDGPU::ALU_LITERAL_X);
83 MIB.addReg(0); // PREDICATE_BIT
88 unsigned R600InstrInfo::getIEQOpcode() const {
89 return AMDGPU::SETE_INT;
92 bool R600InstrInfo::isMov(unsigned Opcode) const {
96 default: return false;
98 case AMDGPU::MOV_IMM_F32:
99 case AMDGPU::MOV_IMM_I32:
104 // Some instructions act as place holders to emulate operations that the GPU
105 // hardware does automatically. This function can be used to check if
106 // an opcode falls into this category.
107 bool R600InstrInfo::isPlaceHolderOpcode(unsigned Opcode) const {
109 default: return false;
115 bool R600InstrInfo::isReductionOp(unsigned Opcode) const {
117 default: return false;
118 case AMDGPU::DOT4_r600_pseudo:
119 case AMDGPU::DOT4_eg_pseudo:
124 bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
126 default: return false;
127 case AMDGPU::CUBE_r600_pseudo:
128 case AMDGPU::CUBE_r600_real:
129 case AMDGPU::CUBE_eg_pseudo:
130 case AMDGPU::CUBE_eg_real:
135 bool R600InstrInfo::isALUInstr(unsigned Opcode) const {
136 unsigned TargetFlags = get(Opcode).TSFlags;
138 return ((TargetFlags & R600_InstFlag::OP1) |
139 (TargetFlags & R600_InstFlag::OP2) |
140 (TargetFlags & R600_InstFlag::OP3));
143 bool R600InstrInfo::usesVertexCache(unsigned Opcode) const {
144 return ST.hasVertexCache() && get(Opcode).TSFlags & R600_InstFlag::VTX_INST;
147 bool R600InstrInfo::usesVertexCache(const MachineInstr *MI) const {
148 return usesVertexCache(MI->getOpcode());
151 bool R600InstrInfo::usesTextureCache(unsigned Opcode) const {
152 return (!ST.hasVertexCache() && get(Opcode).TSFlags & R600_InstFlag::VTX_INST) ||
153 (get(Opcode).TSFlags & R600_InstFlag::TEX_INST);
156 bool R600InstrInfo::usesTextureCache(const MachineInstr *MI) const {
157 return usesTextureCache(MI->getOpcode());
161 R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts)
163 assert (Consts.size() <= 12 && "Too many operands in instructions group");
164 unsigned Pair1 = 0, Pair2 = 0;
165 for (unsigned i = 0, n = Consts.size(); i < n; ++i) {
166 unsigned ReadConstHalf = Consts[i] & 2;
167 unsigned ReadConstIndex = Consts[i] & (~3);
168 unsigned ReadHalfConst = ReadConstIndex | ReadConstHalf;
170 Pair1 = ReadHalfConst;
173 if (Pair1 == ReadHalfConst)
176 Pair2 = ReadHalfConst;
179 if (Pair2 != ReadHalfConst)
186 R600InstrInfo::canBundle(const std::vector<MachineInstr *> &MIs) const {
187 std::vector<unsigned> Consts;
188 for (unsigned i = 0, n = MIs.size(); i < n; i++) {
189 const MachineInstr *MI = MIs[i];
191 const R600Operands::Ops OpTable[3][2] = {
192 {R600Operands::SRC0, R600Operands::SRC0_SEL},
193 {R600Operands::SRC1, R600Operands::SRC1_SEL},
194 {R600Operands::SRC2, R600Operands::SRC2_SEL},
197 if (!isALUInstr(MI->getOpcode()))
200 for (unsigned j = 0; j < 3; j++) {
201 int SrcIdx = getOperandIdx(MI->getOpcode(), OpTable[j][0]);
204 if (MI->getOperand(SrcIdx).getReg() == AMDGPU::ALU_CONST) {
205 unsigned Const = MI->getOperand(
206 getOperandIdx(MI->getOpcode(), OpTable[j][1])).getImm();
207 Consts.push_back(Const);
211 return fitsConstReadLimitations(Consts);
214 DFAPacketizer *R600InstrInfo::CreateTargetScheduleState(const TargetMachine *TM,
215 const ScheduleDAG *DAG) const {
216 const InstrItineraryData *II = TM->getInstrItineraryData();
217 return TM->getSubtarget<AMDGPUSubtarget>().createDFAPacketizer(II);
221 isPredicateSetter(unsigned Opcode) {
230 static MachineInstr *
231 findFirstPredicateSetterFrom(MachineBasicBlock &MBB,
232 MachineBasicBlock::iterator I) {
233 while (I != MBB.begin()) {
235 MachineInstr *MI = I;
236 if (isPredicateSetter(MI->getOpcode()))
244 bool isJump(unsigned Opcode) {
245 return Opcode == AMDGPU::JUMP || Opcode == AMDGPU::JUMP_COND;
249 R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
250 MachineBasicBlock *&TBB,
251 MachineBasicBlock *&FBB,
252 SmallVectorImpl<MachineOperand> &Cond,
253 bool AllowModify) const {
254 // Most of the following comes from the ARM implementation of AnalyzeBranch
256 // If the block has no terminators, it just falls into the block after it.
257 MachineBasicBlock::iterator I = MBB.end();
258 if (I == MBB.begin())
261 while (I->isDebugValue()) {
262 if (I == MBB.begin())
266 if (!isJump(static_cast<MachineInstr *>(I)->getOpcode())) {
270 // Get the last instruction in the block.
271 MachineInstr *LastInst = I;
273 // If there is only one terminator instruction, process it.
274 unsigned LastOpc = LastInst->getOpcode();
275 if (I == MBB.begin() ||
276 !isJump(static_cast<MachineInstr *>(--I)->getOpcode())) {
277 if (LastOpc == AMDGPU::JUMP) {
278 TBB = LastInst->getOperand(0).getMBB();
280 } else if (LastOpc == AMDGPU::JUMP_COND) {
281 MachineInstr *predSet = I;
282 while (!isPredicateSetter(predSet->getOpcode())) {
285 TBB = LastInst->getOperand(0).getMBB();
286 Cond.push_back(predSet->getOperand(1));
287 Cond.push_back(predSet->getOperand(2));
288 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
291 return true; // Can't handle indirect branch.
294 // Get the instruction before it if it is a terminator.
295 MachineInstr *SecondLastInst = I;
296 unsigned SecondLastOpc = SecondLastInst->getOpcode();
298 // If the block ends with a B and a Bcc, handle it.
299 if (SecondLastOpc == AMDGPU::JUMP_COND && LastOpc == AMDGPU::JUMP) {
300 MachineInstr *predSet = --I;
301 while (!isPredicateSetter(predSet->getOpcode())) {
304 TBB = SecondLastInst->getOperand(0).getMBB();
305 FBB = LastInst->getOperand(0).getMBB();
306 Cond.push_back(predSet->getOperand(1));
307 Cond.push_back(predSet->getOperand(2));
308 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
312 // Otherwise, can't handle this.
316 int R600InstrInfo::getBranchInstr(const MachineOperand &op) const {
317 const MachineInstr *MI = op.getParent();
319 switch (MI->getDesc().OpInfo->RegClass) {
320 default: // FIXME: fallthrough??
321 case AMDGPU::GPRI32RegClassID: return AMDGPU::BRANCH_COND_i32;
322 case AMDGPU::GPRF32RegClassID: return AMDGPU::BRANCH_COND_f32;
327 R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
328 MachineBasicBlock *TBB,
329 MachineBasicBlock *FBB,
330 const SmallVectorImpl<MachineOperand> &Cond,
332 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
336 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(TBB);
339 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
340 assert(PredSet && "No previous predicate !");
341 addFlag(PredSet, 0, MO_FLAG_PUSH);
342 PredSet->getOperand(2).setImm(Cond[1].getImm());
344 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
346 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
350 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
351 assert(PredSet && "No previous predicate !");
352 addFlag(PredSet, 0, MO_FLAG_PUSH);
353 PredSet->getOperand(2).setImm(Cond[1].getImm());
354 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
356 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
357 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(FBB);
363 R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
365 // Note : we leave PRED* instructions there.
366 // They may be needed when predicating instructions.
368 MachineBasicBlock::iterator I = MBB.end();
370 if (I == MBB.begin()) {
374 switch (I->getOpcode()) {
377 case AMDGPU::JUMP_COND: {
378 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
379 clearFlag(predSet, 0, MO_FLAG_PUSH);
380 I->eraseFromParent();
384 I->eraseFromParent();
389 if (I == MBB.begin()) {
393 switch (I->getOpcode()) {
394 // FIXME: only one case??
397 case AMDGPU::JUMP_COND: {
398 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
399 clearFlag(predSet, 0, MO_FLAG_PUSH);
400 I->eraseFromParent();
404 I->eraseFromParent();
411 R600InstrInfo::isPredicated(const MachineInstr *MI) const {
412 int idx = MI->findFirstPredOperandIdx();
416 unsigned Reg = MI->getOperand(idx).getReg();
418 default: return false;
419 case AMDGPU::PRED_SEL_ONE:
420 case AMDGPU::PRED_SEL_ZERO:
421 case AMDGPU::PREDICATE_BIT:
427 R600InstrInfo::isPredicable(MachineInstr *MI) const {
428 // XXX: KILL* instructions can be predicated, but they must be the last
429 // instruction in a clause, so this means any instructions after them cannot
430 // be predicated. Until we have proper support for instruction clauses in the
431 // backend, we will mark KILL* instructions as unpredicable.
433 if (MI->getOpcode() == AMDGPU::KILLGT) {
435 } else if (isVector(*MI)) {
438 return AMDGPUInstrInfo::isPredicable(MI);
444 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
446 unsigned ExtraPredCycles,
447 const BranchProbability &Probability) const{
452 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
454 unsigned ExtraTCycles,
455 MachineBasicBlock &FMBB,
457 unsigned ExtraFCycles,
458 const BranchProbability &Probability) const {
463 R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
465 const BranchProbability &Probability)
471 R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
472 MachineBasicBlock &FMBB) const {
478 R600InstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
479 MachineOperand &MO = Cond[1];
480 switch (MO.getImm()) {
481 case OPCODE_IS_ZERO_INT:
482 MO.setImm(OPCODE_IS_NOT_ZERO_INT);
484 case OPCODE_IS_NOT_ZERO_INT:
485 MO.setImm(OPCODE_IS_ZERO_INT);
488 MO.setImm(OPCODE_IS_NOT_ZERO);
490 case OPCODE_IS_NOT_ZERO:
491 MO.setImm(OPCODE_IS_ZERO);
497 MachineOperand &MO2 = Cond[2];
498 switch (MO2.getReg()) {
499 case AMDGPU::PRED_SEL_ZERO:
500 MO2.setReg(AMDGPU::PRED_SEL_ONE);
502 case AMDGPU::PRED_SEL_ONE:
503 MO2.setReg(AMDGPU::PRED_SEL_ZERO);
512 R600InstrInfo::DefinesPredicate(MachineInstr *MI,
513 std::vector<MachineOperand> &Pred) const {
514 return isPredicateSetter(MI->getOpcode());
519 R600InstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
520 const SmallVectorImpl<MachineOperand> &Pred2) const {
526 R600InstrInfo::PredicateInstruction(MachineInstr *MI,
527 const SmallVectorImpl<MachineOperand> &Pred) const {
528 int PIdx = MI->findFirstPredOperandIdx();
531 MachineOperand &PMO = MI->getOperand(PIdx);
532 PMO.setReg(Pred[2].getReg());
533 MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
534 MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
541 unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
542 const MachineInstr *MI,
543 unsigned *PredCost) const {
549 int R600InstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
550 const MachineRegisterInfo &MRI = MF.getRegInfo();
551 const MachineFrameInfo *MFI = MF.getFrameInfo();
554 if (MFI->getNumObjects() == 0) {
558 if (MRI.livein_empty()) {
562 for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
563 LE = MRI.livein_end();
565 Offset = std::max(Offset,
566 GET_REG_INDEX(RI.getEncodingValue(LI->first)));
572 int R600InstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
574 const MachineFrameInfo *MFI = MF.getFrameInfo();
576 // Variable sized objects are not supported
577 assert(!MFI->hasVarSizedObjects());
579 if (MFI->getNumObjects() == 0) {
583 Offset = TM.getFrameLowering()->getFrameIndexOffset(MF, -1);
585 return getIndirectIndexBegin(MF) + Offset;
588 std::vector<unsigned> R600InstrInfo::getIndirectReservedRegs(
589 const MachineFunction &MF) const {
590 const AMDGPUFrameLowering *TFL =
591 static_cast<const AMDGPUFrameLowering*>(TM.getFrameLowering());
592 std::vector<unsigned> Regs;
594 unsigned StackWidth = TFL->getStackWidth(MF);
595 int End = getIndirectIndexEnd(MF);
601 for (int Index = getIndirectIndexBegin(MF); Index <= End; ++Index) {
602 unsigned SuperReg = AMDGPU::R600_Reg128RegClass.getRegister(Index);
603 Regs.push_back(SuperReg);
604 for (unsigned Chan = 0; Chan < StackWidth; ++Chan) {
605 unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister((4 * Index) + Chan);
612 unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex,
613 unsigned Channel) const {
614 // XXX: Remove when we support a stack width > 2
615 assert(Channel == 0);
619 const TargetRegisterClass * R600InstrInfo::getIndirectAddrStoreRegClass(
620 unsigned SourceReg) const {
621 return &AMDGPU::R600_TReg32RegClass;
624 const TargetRegisterClass *R600InstrInfo::getIndirectAddrLoadRegClass() const {
625 return &AMDGPU::TRegMemRegClass;
628 MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
629 MachineBasicBlock::iterator I,
630 unsigned ValueReg, unsigned Address,
631 unsigned OffsetReg) const {
632 unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address);
633 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
634 AMDGPU::AR_X, OffsetReg);
635 setImmOperand(MOVA, R600Operands::WRITE, 0);
637 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
639 .addReg(AMDGPU::AR_X, RegState::Implicit);
640 setImmOperand(Mov, R600Operands::DST_REL, 1);
644 MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
645 MachineBasicBlock::iterator I,
646 unsigned ValueReg, unsigned Address,
647 unsigned OffsetReg) const {
648 unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address);
649 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
652 setImmOperand(MOVA, R600Operands::WRITE, 0);
653 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
656 .addReg(AMDGPU::AR_X, RegState::Implicit);
657 setImmOperand(Mov, R600Operands::SRC0_REL, 1);
662 const TargetRegisterClass *R600InstrInfo::getSuperIndirectRegClass() const {
663 return &AMDGPU::IndirectRegRegClass;
666 unsigned R600InstrInfo::getMaxAlusPerClause() const {
670 MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MBB,
671 MachineBasicBlock::iterator I,
675 unsigned Src1Reg) const {
676 MachineInstrBuilder MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opcode),
680 MIB.addImm(0) // $update_exec_mask
681 .addImm(0); // $update_predicate
683 MIB.addImm(1) // $write
685 .addImm(0) // $dst_rel
686 .addImm(0) // $dst_clamp
687 .addReg(Src0Reg) // $src0
688 .addImm(0) // $src0_neg
689 .addImm(0) // $src0_rel
690 .addImm(0) // $src0_abs
691 .addImm(-1); // $src0_sel
694 MIB.addReg(Src1Reg) // $src1
695 .addImm(0) // $src1_neg
696 .addImm(0) // $src1_rel
697 .addImm(0) // $src1_abs
698 .addImm(-1); // $src1_sel
701 //XXX: The r600g finalizer expects this to be 1, once we've moved the
702 //scheduling to the backend, we can change the default to 0.
703 MIB.addImm(1) // $last
704 .addReg(AMDGPU::PRED_SEL_OFF) // $pred_sel
705 .addImm(0); // $literal
710 MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
711 MachineBasicBlock::iterator I,
713 uint64_t Imm) const {
714 MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg,
715 AMDGPU::ALU_LITERAL_X);
716 setImmOperand(MovImm, R600Operands::IMM, Imm);
720 int R600InstrInfo::getOperandIdx(const MachineInstr &MI,
721 R600Operands::Ops Op) const {
722 return getOperandIdx(MI.getOpcode(), Op);
725 int R600InstrInfo::getOperandIdx(unsigned Opcode,
726 R600Operands::Ops Op) const {
727 unsigned TargetFlags = get(Opcode).TSFlags;
730 if (!HAS_NATIVE_OPERANDS(TargetFlags)) {
732 case R600Operands::DST: return 0;
733 case R600Operands::SRC0: return 1;
734 case R600Operands::SRC1: return 2;
735 case R600Operands::SRC2: return 3;
737 assert(!"Unknown operand type for instruction");
742 if (TargetFlags & R600_InstFlag::OP1) {
744 } else if (TargetFlags & R600_InstFlag::OP2) {
747 assert((TargetFlags & R600_InstFlag::OP3) && "OP1, OP2, or OP3 not defined "
748 "for this instruction");
752 return R600Operands::ALUOpTable[OpTableIdx][Op];
755 void R600InstrInfo::setImmOperand(MachineInstr *MI, R600Operands::Ops Op,
757 int Idx = getOperandIdx(*MI, Op);
758 assert(Idx != -1 && "Operand not supported for this instruction.");
759 assert(MI->getOperand(Idx).isImm());
760 MI->getOperand(Idx).setImm(Imm);
763 //===----------------------------------------------------------------------===//
764 // Instruction flag getters/setters
765 //===----------------------------------------------------------------------===//
767 bool R600InstrInfo::hasFlagOperand(const MachineInstr &MI) const {
768 return GET_FLAG_OPERAND_IDX(get(MI.getOpcode()).TSFlags) != 0;
771 MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
772 unsigned Flag) const {
773 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
776 // If we pass something other than the default value of Flag to this
777 // function, it means we are want to set a flag on an instruction
778 // that uses native encoding.
779 assert(HAS_NATIVE_OPERANDS(TargetFlags));
780 bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3;
783 FlagIndex = getOperandIdx(*MI, R600Operands::CLAMP);
786 FlagIndex = getOperandIdx(*MI, R600Operands::WRITE);
788 case MO_FLAG_NOT_LAST:
790 FlagIndex = getOperandIdx(*MI, R600Operands::LAST);
794 case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_NEG); break;
795 case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_NEG); break;
796 case 2: FlagIndex = getOperandIdx(*MI, R600Operands::SRC2_NEG); break;
801 assert(!IsOP3 && "Cannot set absolute value modifier for OP3 "
805 case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_ABS); break;
806 case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_ABS); break;
814 assert(FlagIndex != -1 && "Flag not supported for this instruction");
816 FlagIndex = GET_FLAG_OPERAND_IDX(TargetFlags);
817 assert(FlagIndex != 0 &&
818 "Instruction flags not supported for this instruction");
821 MachineOperand &FlagOp = MI->getOperand(FlagIndex);
822 assert(FlagOp.isImm());
826 void R600InstrInfo::addFlag(MachineInstr *MI, unsigned Operand,
827 unsigned Flag) const {
828 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
832 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
833 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
834 if (Flag == MO_FLAG_NOT_LAST) {
835 clearFlag(MI, Operand, MO_FLAG_LAST);
836 } else if (Flag == MO_FLAG_MASK) {
837 clearFlag(MI, Operand, Flag);
842 MachineOperand &FlagOp = getFlagOp(MI, Operand);
843 FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * Operand)));
847 void R600InstrInfo::clearFlag(MachineInstr *MI, unsigned Operand,
848 unsigned Flag) const {
849 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
850 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
851 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
854 MachineOperand &FlagOp = getFlagOp(MI);
855 unsigned InstFlags = FlagOp.getImm();
856 InstFlags &= ~(Flag << (NUM_MO_FLAGS * Operand));
857 FlagOp.setImm(InstFlags);