1 //===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief R600 Implementation of TargetInstrInfo.
13 //===----------------------------------------------------------------------===//
15 #include "R600InstrInfo.h"
17 #include "AMDGPUSubtarget.h"
18 #include "AMDGPUTargetMachine.h"
19 #include "R600Defines.h"
20 #include "R600MachineFunctionInfo.h"
21 #include "R600RegisterInfo.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #define GET_INSTRINFO_CTOR_DTOR
29 #include "AMDGPUGenDFAPacketizer.inc"
31 R600InstrInfo::R600InstrInfo(const AMDGPUSubtarget &st)
32 : AMDGPUInstrInfo(st),
36 const R600RegisterInfo &R600InstrInfo::getRegisterInfo() const {
40 bool R600InstrInfo::isTrig(const MachineInstr &MI) const {
41 return get(MI.getOpcode()).TSFlags & R600_InstFlag::TRIG;
44 bool R600InstrInfo::isVector(const MachineInstr &MI) const {
45 return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR;
49 R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
50 MachineBasicBlock::iterator MI, DebugLoc DL,
51 unsigned DestReg, unsigned SrcReg,
53 unsigned VectorComponents = 0;
54 if (AMDGPU::R600_Reg128RegClass.contains(DestReg) &&
55 AMDGPU::R600_Reg128RegClass.contains(SrcReg)) {
57 } else if(AMDGPU::R600_Reg64RegClass.contains(DestReg) &&
58 AMDGPU::R600_Reg64RegClass.contains(SrcReg)) {
62 if (VectorComponents > 0) {
63 for (unsigned I = 0; I < VectorComponents; I++) {
64 unsigned SubRegIndex = RI.getSubRegFromChannel(I);
65 buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
66 RI.getSubReg(DestReg, SubRegIndex),
67 RI.getSubReg(SrcReg, SubRegIndex))
69 RegState::Define | RegState::Implicit);
72 MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV,
74 NewMI->getOperand(getOperandIdx(*NewMI, AMDGPU::OpName::src0))
79 /// \returns true if \p MBBI can be moved into a new basic.
80 bool R600InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
81 MachineBasicBlock::iterator MBBI) const {
82 for (MachineInstr::const_mop_iterator I = MBBI->operands_begin(),
83 E = MBBI->operands_end(); I != E; ++I) {
84 if (I->isReg() && !TargetRegisterInfo::isVirtualRegister(I->getReg()) &&
85 I->isUse() && RI.isPhysRegLiveAcrossClauses(I->getReg()))
91 unsigned R600InstrInfo::getIEQOpcode() const {
92 return AMDGPU::SETE_INT;
95 bool R600InstrInfo::isMov(unsigned Opcode) const {
99 default: return false;
101 case AMDGPU::MOV_IMM_F32:
102 case AMDGPU::MOV_IMM_I32:
107 // Some instructions act as place holders to emulate operations that the GPU
108 // hardware does automatically. This function can be used to check if
109 // an opcode falls into this category.
110 bool R600InstrInfo::isPlaceHolderOpcode(unsigned Opcode) const {
112 default: return false;
118 bool R600InstrInfo::isReductionOp(unsigned Opcode) const {
122 bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
124 default: return false;
125 case AMDGPU::CUBE_r600_pseudo:
126 case AMDGPU::CUBE_r600_real:
127 case AMDGPU::CUBE_eg_pseudo:
128 case AMDGPU::CUBE_eg_real:
133 bool R600InstrInfo::isALUInstr(unsigned Opcode) const {
134 unsigned TargetFlags = get(Opcode).TSFlags;
136 return (TargetFlags & R600_InstFlag::ALU_INST);
139 bool R600InstrInfo::hasInstrModifiers(unsigned Opcode) const {
140 unsigned TargetFlags = get(Opcode).TSFlags;
142 return ((TargetFlags & R600_InstFlag::OP1) |
143 (TargetFlags & R600_InstFlag::OP2) |
144 (TargetFlags & R600_InstFlag::OP3));
147 bool R600InstrInfo::isLDSInstr(unsigned Opcode) const {
148 unsigned TargetFlags = get(Opcode).TSFlags;
150 return ((TargetFlags & R600_InstFlag::LDS_1A) |
151 (TargetFlags & R600_InstFlag::LDS_1A1D) |
152 (TargetFlags & R600_InstFlag::LDS_1A2D));
155 bool R600InstrInfo::isLDSNoRetInstr(unsigned Opcode) const {
156 return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) == -1;
159 bool R600InstrInfo::isLDSRetInstr(unsigned Opcode) const {
160 return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) != -1;
163 bool R600InstrInfo::canBeConsideredALU(const MachineInstr *MI) const {
164 if (isALUInstr(MI->getOpcode()))
166 if (isVector(*MI) || isCubeOp(MI->getOpcode()))
168 switch (MI->getOpcode()) {
170 case AMDGPU::INTERP_PAIR_XY:
171 case AMDGPU::INTERP_PAIR_ZW:
172 case AMDGPU::INTERP_VEC_LOAD:
181 bool R600InstrInfo::isTransOnly(unsigned Opcode) const {
182 if (ST.hasCaymanISA())
184 return (get(Opcode).getSchedClass() == AMDGPU::Sched::TransALU);
187 bool R600InstrInfo::isTransOnly(const MachineInstr *MI) const {
188 return isTransOnly(MI->getOpcode());
191 bool R600InstrInfo::isVectorOnly(unsigned Opcode) const {
192 return (get(Opcode).getSchedClass() == AMDGPU::Sched::VecALU);
195 bool R600InstrInfo::isVectorOnly(const MachineInstr *MI) const {
196 return isVectorOnly(MI->getOpcode());
199 bool R600InstrInfo::isExport(unsigned Opcode) const {
200 return (get(Opcode).TSFlags & R600_InstFlag::IS_EXPORT);
203 bool R600InstrInfo::usesVertexCache(unsigned Opcode) const {
204 return ST.hasVertexCache() && IS_VTX(get(Opcode));
207 bool R600InstrInfo::usesVertexCache(const MachineInstr *MI) const {
208 const R600MachineFunctionInfo *MFI = MI->getParent()->getParent()->getInfo<R600MachineFunctionInfo>();
209 return MFI->ShaderType != ShaderType::COMPUTE && usesVertexCache(MI->getOpcode());
212 bool R600InstrInfo::usesTextureCache(unsigned Opcode) const {
213 return (!ST.hasVertexCache() && IS_VTX(get(Opcode))) || IS_TEX(get(Opcode));
216 bool R600InstrInfo::usesTextureCache(const MachineInstr *MI) const {
217 const R600MachineFunctionInfo *MFI = MI->getParent()->getParent()->getInfo<R600MachineFunctionInfo>();
218 return (MFI->ShaderType == ShaderType::COMPUTE && usesVertexCache(MI->getOpcode())) ||
219 usesTextureCache(MI->getOpcode());
222 bool R600InstrInfo::mustBeLastInClause(unsigned Opcode) const {
225 case AMDGPU::GROUP_BARRIER:
232 bool R600InstrInfo::usesAddressRegister(MachineInstr *MI) const {
233 return MI->findRegisterUseOperandIdx(AMDGPU::AR_X) != -1;
236 bool R600InstrInfo::definesAddressRegister(MachineInstr *MI) const {
237 return MI->findRegisterDefOperandIdx(AMDGPU::AR_X) != -1;
240 bool R600InstrInfo::readsLDSSrcReg(const MachineInstr *MI) const {
241 if (!isALUInstr(MI->getOpcode())) {
244 for (MachineInstr::const_mop_iterator I = MI->operands_begin(),
245 E = MI->operands_end(); I != E; ++I) {
246 if (!I->isReg() || !I->isUse() ||
247 TargetRegisterInfo::isVirtualRegister(I->getReg()))
250 if (AMDGPU::R600_LDS_SRC_REGRegClass.contains(I->getReg()))
256 int R600InstrInfo::getSrcIdx(unsigned Opcode, unsigned SrcNum) const {
257 static const unsigned OpTable[] = {
258 AMDGPU::OpName::src0,
259 AMDGPU::OpName::src1,
264 return getOperandIdx(Opcode, OpTable[SrcNum]);
267 #define SRC_SEL_ROWS 11
268 int R600InstrInfo::getSelIdx(unsigned Opcode, unsigned SrcIdx) const {
269 static const unsigned SrcSelTable[SRC_SEL_ROWS][2] = {
270 {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
271 {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel},
272 {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel},
273 {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
274 {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
275 {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z},
276 {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W},
277 {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X},
278 {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y},
279 {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z},
280 {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W}
283 for (unsigned i = 0; i < SRC_SEL_ROWS; ++i) {
284 if (getOperandIdx(Opcode, SrcSelTable[i][0]) == (int)SrcIdx) {
285 return getOperandIdx(Opcode, SrcSelTable[i][1]);
292 SmallVector<std::pair<MachineOperand *, int64_t>, 3>
293 R600InstrInfo::getSrcs(MachineInstr *MI) const {
294 SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result;
296 if (MI->getOpcode() == AMDGPU::DOT_4) {
297 static const unsigned OpTable[8][2] = {
298 {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X},
299 {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y},
300 {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z},
301 {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W},
302 {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X},
303 {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y},
304 {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z},
305 {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W},
308 for (unsigned j = 0; j < 8; j++) {
309 MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(),
311 unsigned Reg = MO.getReg();
312 if (Reg == AMDGPU::ALU_CONST) {
313 unsigned Sel = MI->getOperand(getOperandIdx(MI->getOpcode(),
314 OpTable[j][1])).getImm();
315 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel));
323 static const unsigned OpTable[3][2] = {
324 {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel},
325 {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel},
326 {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel},
329 for (unsigned j = 0; j < 3; j++) {
330 int SrcIdx = getOperandIdx(MI->getOpcode(), OpTable[j][0]);
333 MachineOperand &MO = MI->getOperand(SrcIdx);
334 unsigned Reg = MI->getOperand(SrcIdx).getReg();
335 if (Reg == AMDGPU::ALU_CONST) {
336 unsigned Sel = MI->getOperand(
337 getOperandIdx(MI->getOpcode(), OpTable[j][1])).getImm();
338 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel));
341 if (Reg == AMDGPU::ALU_LITERAL_X) {
342 unsigned Imm = MI->getOperand(
343 getOperandIdx(MI->getOpcode(), AMDGPU::OpName::literal)).getImm();
344 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Imm));
347 Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, 0));
352 std::vector<std::pair<int, unsigned> >
353 R600InstrInfo::ExtractSrcs(MachineInstr *MI,
354 const DenseMap<unsigned, unsigned> &PV,
355 unsigned &ConstCount) const {
357 const SmallVector<std::pair<MachineOperand *, int64_t>, 3> Srcs = getSrcs(MI);
358 const std::pair<int, unsigned> DummyPair(-1, 0);
359 std::vector<std::pair<int, unsigned> > Result;
361 for (unsigned n = Srcs.size(); i < n; ++i) {
362 unsigned Reg = Srcs[i].first->getReg();
363 unsigned Index = RI.getEncodingValue(Reg) & 0xff;
364 if (Reg == AMDGPU::OQAP) {
365 Result.push_back(std::pair<int, unsigned>(Index, 0));
367 if (PV.find(Reg) != PV.end()) {
368 // 255 is used to tells its a PS/PV reg
369 Result.push_back(std::pair<int, unsigned>(255, 0));
374 Result.push_back(DummyPair);
377 unsigned Chan = RI.getHWRegChan(Reg);
378 Result.push_back(std::pair<int, unsigned>(Index, Chan));
381 Result.push_back(DummyPair);
385 static std::vector<std::pair<int, unsigned> >
386 Swizzle(std::vector<std::pair<int, unsigned> > Src,
387 R600InstrInfo::BankSwizzle Swz) {
388 if (Src[0] == Src[1])
391 case R600InstrInfo::ALU_VEC_012_SCL_210:
393 case R600InstrInfo::ALU_VEC_021_SCL_122:
394 std::swap(Src[1], Src[2]);
396 case R600InstrInfo::ALU_VEC_102_SCL_221:
397 std::swap(Src[0], Src[1]);
399 case R600InstrInfo::ALU_VEC_120_SCL_212:
400 std::swap(Src[0], Src[1]);
401 std::swap(Src[0], Src[2]);
403 case R600InstrInfo::ALU_VEC_201:
404 std::swap(Src[0], Src[2]);
405 std::swap(Src[0], Src[1]);
407 case R600InstrInfo::ALU_VEC_210:
408 std::swap(Src[0], Src[2]);
415 getTransSwizzle(R600InstrInfo::BankSwizzle Swz, unsigned Op) {
417 case R600InstrInfo::ALU_VEC_012_SCL_210: {
418 unsigned Cycles[3] = { 2, 1, 0};
421 case R600InstrInfo::ALU_VEC_021_SCL_122: {
422 unsigned Cycles[3] = { 1, 2, 2};
425 case R600InstrInfo::ALU_VEC_120_SCL_212: {
426 unsigned Cycles[3] = { 2, 1, 2};
429 case R600InstrInfo::ALU_VEC_102_SCL_221: {
430 unsigned Cycles[3] = { 2, 2, 1};
434 llvm_unreachable("Wrong Swizzle for Trans Slot");
439 /// returns how many MIs (whose inputs are represented by IGSrcs) can be packed
440 /// in the same Instruction Group while meeting read port limitations given a
441 /// Swz swizzle sequence.
442 unsigned R600InstrInfo::isLegalUpTo(
443 const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
444 const std::vector<R600InstrInfo::BankSwizzle> &Swz,
445 const std::vector<std::pair<int, unsigned> > &TransSrcs,
446 R600InstrInfo::BankSwizzle TransSwz) const {
448 memset(Vector, -1, sizeof(Vector));
449 for (unsigned i = 0, e = IGSrcs.size(); i < e; i++) {
450 const std::vector<std::pair<int, unsigned> > &Srcs =
451 Swizzle(IGSrcs[i], Swz[i]);
452 for (unsigned j = 0; j < 3; j++) {
453 const std::pair<int, unsigned> &Src = Srcs[j];
454 if (Src.first < 0 || Src.first == 255)
456 if (Src.first == GET_REG_INDEX(RI.getEncodingValue(AMDGPU::OQAP))) {
457 if (Swz[i] != R600InstrInfo::ALU_VEC_012_SCL_210 &&
458 Swz[i] != R600InstrInfo::ALU_VEC_021_SCL_122) {
459 // The value from output queue A (denoted by register OQAP) can
460 // only be fetched during the first cycle.
463 // OQAP does not count towards the normal read port restrictions
466 if (Vector[Src.second][j] < 0)
467 Vector[Src.second][j] = Src.first;
468 if (Vector[Src.second][j] != Src.first)
472 // Now check Trans Alu
473 for (unsigned i = 0, e = TransSrcs.size(); i < e; ++i) {
474 const std::pair<int, unsigned> &Src = TransSrcs[i];
475 unsigned Cycle = getTransSwizzle(TransSwz, i);
478 if (Src.first == 255)
480 if (Vector[Src.second][Cycle] < 0)
481 Vector[Src.second][Cycle] = Src.first;
482 if (Vector[Src.second][Cycle] != Src.first)
483 return IGSrcs.size() - 1;
485 return IGSrcs.size();
488 /// Given a swizzle sequence SwzCandidate and an index Idx, returns the next
489 /// (in lexicographic term) swizzle sequence assuming that all swizzles after
490 /// Idx can be skipped
492 NextPossibleSolution(
493 std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
495 assert(Idx < SwzCandidate.size());
497 while (ResetIdx > -1 && SwzCandidate[ResetIdx] == R600InstrInfo::ALU_VEC_210)
499 for (unsigned i = ResetIdx + 1, e = SwzCandidate.size(); i < e; i++) {
500 SwzCandidate[i] = R600InstrInfo::ALU_VEC_012_SCL_210;
504 int NextSwizzle = SwzCandidate[ResetIdx] + 1;
505 SwzCandidate[ResetIdx] = (R600InstrInfo::BankSwizzle)NextSwizzle;
509 /// Enumerate all possible Swizzle sequence to find one that can meet all
510 /// read port requirements.
511 bool R600InstrInfo::FindSwizzleForVectorSlot(
512 const std::vector<std::vector<std::pair<int, unsigned> > > &IGSrcs,
513 std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
514 const std::vector<std::pair<int, unsigned> > &TransSrcs,
515 R600InstrInfo::BankSwizzle TransSwz) const {
516 unsigned ValidUpTo = 0;
518 ValidUpTo = isLegalUpTo(IGSrcs, SwzCandidate, TransSrcs, TransSwz);
519 if (ValidUpTo == IGSrcs.size())
521 } while (NextPossibleSolution(SwzCandidate, ValidUpTo));
525 /// Instructions in Trans slot can't read gpr at cycle 0 if they also read
526 /// a const, and can't read a gpr at cycle 1 if they read 2 const.
528 isConstCompatible(R600InstrInfo::BankSwizzle TransSwz,
529 const std::vector<std::pair<int, unsigned> > &TransOps,
530 unsigned ConstCount) {
531 // TransALU can't read 3 constants
534 for (unsigned i = 0, e = TransOps.size(); i < e; ++i) {
535 const std::pair<int, unsigned> &Src = TransOps[i];
536 unsigned Cycle = getTransSwizzle(TransSwz, i);
539 if (ConstCount > 0 && Cycle == 0)
541 if (ConstCount > 1 && Cycle == 1)
548 R600InstrInfo::fitsReadPortLimitations(const std::vector<MachineInstr *> &IG,
549 const DenseMap<unsigned, unsigned> &PV,
550 std::vector<BankSwizzle> &ValidSwizzle,
553 //Todo : support shared src0 - src1 operand
555 std::vector<std::vector<std::pair<int, unsigned> > > IGSrcs;
556 ValidSwizzle.clear();
558 BankSwizzle TransBS = ALU_VEC_012_SCL_210;
559 for (unsigned i = 0, e = IG.size(); i < e; ++i) {
560 IGSrcs.push_back(ExtractSrcs(IG[i], PV, ConstCount));
561 unsigned Op = getOperandIdx(IG[i]->getOpcode(),
562 AMDGPU::OpName::bank_swizzle);
563 ValidSwizzle.push_back( (R600InstrInfo::BankSwizzle)
564 IG[i]->getOperand(Op).getImm());
566 std::vector<std::pair<int, unsigned> > TransOps;
568 return FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps, TransBS);
570 TransOps = IGSrcs.back();
572 ValidSwizzle.pop_back();
574 static const R600InstrInfo::BankSwizzle TransSwz[] = {
580 for (unsigned i = 0; i < 4; i++) {
581 TransBS = TransSwz[i];
582 if (!isConstCompatible(TransBS, TransOps, ConstCount))
584 bool Result = FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps,
587 ValidSwizzle.push_back(TransBS);
597 R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts)
599 assert (Consts.size() <= 12 && "Too many operands in instructions group");
600 unsigned Pair1 = 0, Pair2 = 0;
601 for (unsigned i = 0, n = Consts.size(); i < n; ++i) {
602 unsigned ReadConstHalf = Consts[i] & 2;
603 unsigned ReadConstIndex = Consts[i] & (~3);
604 unsigned ReadHalfConst = ReadConstIndex | ReadConstHalf;
606 Pair1 = ReadHalfConst;
609 if (Pair1 == ReadHalfConst)
612 Pair2 = ReadHalfConst;
615 if (Pair2 != ReadHalfConst)
622 R600InstrInfo::fitsConstReadLimitations(const std::vector<MachineInstr *> &MIs)
624 std::vector<unsigned> Consts;
625 SmallSet<int64_t, 4> Literals;
626 for (unsigned i = 0, n = MIs.size(); i < n; i++) {
627 MachineInstr *MI = MIs[i];
628 if (!isALUInstr(MI->getOpcode()))
631 const SmallVectorImpl<std::pair<MachineOperand *, int64_t> > &Srcs =
634 for (unsigned j = 0, e = Srcs.size(); j < e; j++) {
635 std::pair<MachineOperand *, unsigned> Src = Srcs[j];
636 if (Src.first->getReg() == AMDGPU::ALU_LITERAL_X)
637 Literals.insert(Src.second);
638 if (Literals.size() > 4)
640 if (Src.first->getReg() == AMDGPU::ALU_CONST)
641 Consts.push_back(Src.second);
642 if (AMDGPU::R600_KC0RegClass.contains(Src.first->getReg()) ||
643 AMDGPU::R600_KC1RegClass.contains(Src.first->getReg())) {
644 unsigned Index = RI.getEncodingValue(Src.first->getReg()) & 0xff;
645 unsigned Chan = RI.getHWRegChan(Src.first->getReg());
646 Consts.push_back((Index << 2) | Chan);
650 return fitsConstReadLimitations(Consts);
653 DFAPacketizer *R600InstrInfo::CreateTargetScheduleState(const TargetMachine *TM,
654 const ScheduleDAG *DAG) const {
655 const InstrItineraryData *II = TM->getInstrItineraryData();
656 return TM->getSubtarget<AMDGPUSubtarget>().createDFAPacketizer(II);
660 isPredicateSetter(unsigned Opcode) {
669 static MachineInstr *
670 findFirstPredicateSetterFrom(MachineBasicBlock &MBB,
671 MachineBasicBlock::iterator I) {
672 while (I != MBB.begin()) {
674 MachineInstr *MI = I;
675 if (isPredicateSetter(MI->getOpcode()))
683 bool isJump(unsigned Opcode) {
684 return Opcode == AMDGPU::JUMP || Opcode == AMDGPU::JUMP_COND;
687 static bool isBranch(unsigned Opcode) {
688 return Opcode == AMDGPU::BRANCH || Opcode == AMDGPU::BRANCH_COND_i32 ||
689 Opcode == AMDGPU::BRANCH_COND_f32;
693 R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
694 MachineBasicBlock *&TBB,
695 MachineBasicBlock *&FBB,
696 SmallVectorImpl<MachineOperand> &Cond,
697 bool AllowModify) const {
698 // Most of the following comes from the ARM implementation of AnalyzeBranch
700 // If the block has no terminators, it just falls into the block after it.
701 MachineBasicBlock::iterator I = MBB.end();
702 if (I == MBB.begin())
705 while (I->isDebugValue()) {
706 if (I == MBB.begin())
710 // AMDGPU::BRANCH* instructions are only available after isel and are not
712 if (isBranch(I->getOpcode()))
714 if (!isJump(static_cast<MachineInstr *>(I)->getOpcode())) {
718 // Remove successive JUMP
719 while (I != MBB.begin() && std::prev(I)->getOpcode() == AMDGPU::JUMP) {
720 MachineBasicBlock::iterator PriorI = std::prev(I);
722 I->removeFromParent();
725 MachineInstr *LastInst = I;
727 // If there is only one terminator instruction, process it.
728 unsigned LastOpc = LastInst->getOpcode();
729 if (I == MBB.begin() ||
730 !isJump(static_cast<MachineInstr *>(--I)->getOpcode())) {
731 if (LastOpc == AMDGPU::JUMP) {
732 TBB = LastInst->getOperand(0).getMBB();
734 } else if (LastOpc == AMDGPU::JUMP_COND) {
735 MachineInstr *predSet = I;
736 while (!isPredicateSetter(predSet->getOpcode())) {
739 TBB = LastInst->getOperand(0).getMBB();
740 Cond.push_back(predSet->getOperand(1));
741 Cond.push_back(predSet->getOperand(2));
742 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
745 return true; // Can't handle indirect branch.
748 // Get the instruction before it if it is a terminator.
749 MachineInstr *SecondLastInst = I;
750 unsigned SecondLastOpc = SecondLastInst->getOpcode();
752 // If the block ends with a B and a Bcc, handle it.
753 if (SecondLastOpc == AMDGPU::JUMP_COND && LastOpc == AMDGPU::JUMP) {
754 MachineInstr *predSet = --I;
755 while (!isPredicateSetter(predSet->getOpcode())) {
758 TBB = SecondLastInst->getOperand(0).getMBB();
759 FBB = LastInst->getOperand(0).getMBB();
760 Cond.push_back(predSet->getOperand(1));
761 Cond.push_back(predSet->getOperand(2));
762 Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false));
766 // Otherwise, can't handle this.
770 int R600InstrInfo::getBranchInstr(const MachineOperand &op) const {
771 const MachineInstr *MI = op.getParent();
773 switch (MI->getDesc().OpInfo->RegClass) {
774 default: // FIXME: fallthrough??
775 case AMDGPU::GPRI32RegClassID: return AMDGPU::BRANCH_COND_i32;
776 case AMDGPU::GPRF32RegClassID: return AMDGPU::BRANCH_COND_f32;
781 MachineBasicBlock::iterator FindLastAluClause(MachineBasicBlock &MBB) {
782 for (MachineBasicBlock::reverse_iterator It = MBB.rbegin(), E = MBB.rend();
784 if (It->getOpcode() == AMDGPU::CF_ALU ||
785 It->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE)
786 return std::prev(It.base());
792 R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
793 MachineBasicBlock *TBB,
794 MachineBasicBlock *FBB,
795 const SmallVectorImpl<MachineOperand> &Cond,
797 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
801 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(TBB);
804 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
805 assert(PredSet && "No previous predicate !");
806 addFlag(PredSet, 0, MO_FLAG_PUSH);
807 PredSet->getOperand(2).setImm(Cond[1].getImm());
809 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
811 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
812 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
813 if (CfAlu == MBB.end())
815 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU);
816 CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE));
820 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
821 assert(PredSet && "No previous predicate !");
822 addFlag(PredSet, 0, MO_FLAG_PUSH);
823 PredSet->getOperand(2).setImm(Cond[1].getImm());
824 BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND))
826 .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
827 BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(FBB);
828 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
829 if (CfAlu == MBB.end())
831 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU);
832 CfAlu->setDesc(get(AMDGPU::CF_ALU_PUSH_BEFORE));
838 R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
840 // Note : we leave PRED* instructions there.
841 // They may be needed when predicating instructions.
843 MachineBasicBlock::iterator I = MBB.end();
845 if (I == MBB.begin()) {
849 switch (I->getOpcode()) {
852 case AMDGPU::JUMP_COND: {
853 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
854 clearFlag(predSet, 0, MO_FLAG_PUSH);
855 I->eraseFromParent();
856 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
857 if (CfAlu == MBB.end())
859 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE);
860 CfAlu->setDesc(get(AMDGPU::CF_ALU));
864 I->eraseFromParent();
869 if (I == MBB.begin()) {
873 switch (I->getOpcode()) {
874 // FIXME: only one case??
877 case AMDGPU::JUMP_COND: {
878 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
879 clearFlag(predSet, 0, MO_FLAG_PUSH);
880 I->eraseFromParent();
881 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
882 if (CfAlu == MBB.end())
884 assert (CfAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE);
885 CfAlu->setDesc(get(AMDGPU::CF_ALU));
889 I->eraseFromParent();
896 R600InstrInfo::isPredicated(const MachineInstr *MI) const {
897 int idx = MI->findFirstPredOperandIdx();
901 unsigned Reg = MI->getOperand(idx).getReg();
903 default: return false;
904 case AMDGPU::PRED_SEL_ONE:
905 case AMDGPU::PRED_SEL_ZERO:
906 case AMDGPU::PREDICATE_BIT:
912 R600InstrInfo::isPredicable(MachineInstr *MI) const {
913 // XXX: KILL* instructions can be predicated, but they must be the last
914 // instruction in a clause, so this means any instructions after them cannot
915 // be predicated. Until we have proper support for instruction clauses in the
916 // backend, we will mark KILL* instructions as unpredicable.
918 if (MI->getOpcode() == AMDGPU::KILLGT) {
920 } else if (MI->getOpcode() == AMDGPU::CF_ALU) {
921 // If the clause start in the middle of MBB then the MBB has more
922 // than a single clause, unable to predicate several clauses.
923 if (MI->getParent()->begin() != MachineBasicBlock::iterator(MI))
925 // TODO: We don't support KC merging atm
926 if (MI->getOperand(3).getImm() != 0 || MI->getOperand(4).getImm() != 0)
929 } else if (isVector(*MI)) {
932 return AMDGPUInstrInfo::isPredicable(MI);
938 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
940 unsigned ExtraPredCycles,
941 const BranchProbability &Probability) const{
946 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
948 unsigned ExtraTCycles,
949 MachineBasicBlock &FMBB,
951 unsigned ExtraFCycles,
952 const BranchProbability &Probability) const {
957 R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
959 const BranchProbability &Probability)
965 R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
966 MachineBasicBlock &FMBB) const {
972 R600InstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
973 MachineOperand &MO = Cond[1];
974 switch (MO.getImm()) {
975 case OPCODE_IS_ZERO_INT:
976 MO.setImm(OPCODE_IS_NOT_ZERO_INT);
978 case OPCODE_IS_NOT_ZERO_INT:
979 MO.setImm(OPCODE_IS_ZERO_INT);
982 MO.setImm(OPCODE_IS_NOT_ZERO);
984 case OPCODE_IS_NOT_ZERO:
985 MO.setImm(OPCODE_IS_ZERO);
991 MachineOperand &MO2 = Cond[2];
992 switch (MO2.getReg()) {
993 case AMDGPU::PRED_SEL_ZERO:
994 MO2.setReg(AMDGPU::PRED_SEL_ONE);
996 case AMDGPU::PRED_SEL_ONE:
997 MO2.setReg(AMDGPU::PRED_SEL_ZERO);
1006 R600InstrInfo::DefinesPredicate(MachineInstr *MI,
1007 std::vector<MachineOperand> &Pred) const {
1008 return isPredicateSetter(MI->getOpcode());
1013 R600InstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
1014 const SmallVectorImpl<MachineOperand> &Pred2) const {
1020 R600InstrInfo::PredicateInstruction(MachineInstr *MI,
1021 const SmallVectorImpl<MachineOperand> &Pred) const {
1022 int PIdx = MI->findFirstPredOperandIdx();
1024 if (MI->getOpcode() == AMDGPU::CF_ALU) {
1025 MI->getOperand(8).setImm(0);
1029 if (MI->getOpcode() == AMDGPU::DOT_4) {
1030 MI->getOperand(getOperandIdx(*MI, AMDGPU::OpName::pred_sel_X))
1031 .setReg(Pred[2].getReg());
1032 MI->getOperand(getOperandIdx(*MI, AMDGPU::OpName::pred_sel_Y))
1033 .setReg(Pred[2].getReg());
1034 MI->getOperand(getOperandIdx(*MI, AMDGPU::OpName::pred_sel_Z))
1035 .setReg(Pred[2].getReg());
1036 MI->getOperand(getOperandIdx(*MI, AMDGPU::OpName::pred_sel_W))
1037 .setReg(Pred[2].getReg());
1038 MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
1039 MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
1044 MachineOperand &PMO = MI->getOperand(PIdx);
1045 PMO.setReg(Pred[2].getReg());
1046 MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
1047 MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit);
1054 unsigned int R600InstrInfo::getPredicationCost(const MachineInstr *) const {
1058 unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1059 const MachineInstr *MI,
1060 unsigned *PredCost) const {
1066 void R600InstrInfo::reserveIndirectRegisters(BitVector &Reserved,
1067 const MachineFunction &MF) const {
1068 const AMDGPUFrameLowering *TFL =
1069 static_cast<const AMDGPUFrameLowering*>(
1070 MF.getTarget().getFrameLowering());
1072 unsigned StackWidth = TFL->getStackWidth(MF);
1073 int End = getIndirectIndexEnd(MF);
1078 for (int Index = getIndirectIndexBegin(MF); Index <= End; ++Index) {
1079 unsigned SuperReg = AMDGPU::R600_Reg128RegClass.getRegister(Index);
1080 Reserved.set(SuperReg);
1081 for (unsigned Chan = 0; Chan < StackWidth; ++Chan) {
1082 unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister((4 * Index) + Chan);
1088 unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex,
1089 unsigned Channel) const {
1090 // XXX: Remove when we support a stack width > 2
1091 assert(Channel == 0);
1095 const TargetRegisterClass *R600InstrInfo::getIndirectAddrRegClass() const {
1096 return &AMDGPU::R600_TReg32_XRegClass;
1099 MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
1100 MachineBasicBlock::iterator I,
1101 unsigned ValueReg, unsigned Address,
1102 unsigned OffsetReg) const {
1103 unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address);
1104 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
1105 AMDGPU::AR_X, OffsetReg);
1106 setImmOperand(MOVA, AMDGPU::OpName::write, 0);
1108 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
1110 .addReg(AMDGPU::AR_X,
1111 RegState::Implicit | RegState::Kill);
1112 setImmOperand(Mov, AMDGPU::OpName::dst_rel, 1);
1116 MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
1117 MachineBasicBlock::iterator I,
1118 unsigned ValueReg, unsigned Address,
1119 unsigned OffsetReg) const {
1120 unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address);
1121 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg,
1124 setImmOperand(MOVA, AMDGPU::OpName::write, 0);
1125 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV,
1128 .addReg(AMDGPU::AR_X,
1129 RegState::Implicit | RegState::Kill);
1130 setImmOperand(Mov, AMDGPU::OpName::src0_rel, 1);
1135 unsigned R600InstrInfo::getMaxAlusPerClause() const {
1139 MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MBB,
1140 MachineBasicBlock::iterator I,
1144 unsigned Src1Reg) const {
1145 MachineInstrBuilder MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opcode),
1149 MIB.addImm(0) // $update_exec_mask
1150 .addImm(0); // $update_predicate
1152 MIB.addImm(1) // $write
1154 .addImm(0) // $dst_rel
1155 .addImm(0) // $dst_clamp
1156 .addReg(Src0Reg) // $src0
1157 .addImm(0) // $src0_neg
1158 .addImm(0) // $src0_rel
1159 .addImm(0) // $src0_abs
1160 .addImm(-1); // $src0_sel
1163 MIB.addReg(Src1Reg) // $src1
1164 .addImm(0) // $src1_neg
1165 .addImm(0) // $src1_rel
1166 .addImm(0) // $src1_abs
1167 .addImm(-1); // $src1_sel
1170 //XXX: The r600g finalizer expects this to be 1, once we've moved the
1171 //scheduling to the backend, we can change the default to 0.
1172 MIB.addImm(1) // $last
1173 .addReg(AMDGPU::PRED_SEL_OFF) // $pred_sel
1174 .addImm(0) // $literal
1175 .addImm(0); // $bank_swizzle
1180 #define OPERAND_CASE(Label) \
1182 static const unsigned Ops[] = \
1192 static unsigned getSlotedOps(unsigned Op, unsigned Slot) {
1194 OPERAND_CASE(AMDGPU::OpName::update_exec_mask)
1195 OPERAND_CASE(AMDGPU::OpName::update_pred)
1196 OPERAND_CASE(AMDGPU::OpName::write)
1197 OPERAND_CASE(AMDGPU::OpName::omod)
1198 OPERAND_CASE(AMDGPU::OpName::dst_rel)
1199 OPERAND_CASE(AMDGPU::OpName::clamp)
1200 OPERAND_CASE(AMDGPU::OpName::src0)
1201 OPERAND_CASE(AMDGPU::OpName::src0_neg)
1202 OPERAND_CASE(AMDGPU::OpName::src0_rel)
1203 OPERAND_CASE(AMDGPU::OpName::src0_abs)
1204 OPERAND_CASE(AMDGPU::OpName::src0_sel)
1205 OPERAND_CASE(AMDGPU::OpName::src1)
1206 OPERAND_CASE(AMDGPU::OpName::src1_neg)
1207 OPERAND_CASE(AMDGPU::OpName::src1_rel)
1208 OPERAND_CASE(AMDGPU::OpName::src1_abs)
1209 OPERAND_CASE(AMDGPU::OpName::src1_sel)
1210 OPERAND_CASE(AMDGPU::OpName::pred_sel)
1212 llvm_unreachable("Wrong Operand");
1218 MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction(
1219 MachineBasicBlock &MBB, MachineInstr *MI, unsigned Slot, unsigned DstReg)
1221 assert (MI->getOpcode() == AMDGPU::DOT_4 && "Not Implemented");
1223 if (ST.getGeneration() <= AMDGPUSubtarget::R700)
1224 Opcode = AMDGPU::DOT4_r600;
1226 Opcode = AMDGPU::DOT4_eg;
1227 MachineBasicBlock::iterator I = MI;
1228 MachineOperand &Src0 = MI->getOperand(
1229 getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src0, Slot)));
1230 MachineOperand &Src1 = MI->getOperand(
1231 getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src1, Slot)));
1232 MachineInstr *MIB = buildDefaultInstruction(
1233 MBB, I, Opcode, DstReg, Src0.getReg(), Src1.getReg());
1234 static const unsigned Operands[14] = {
1235 AMDGPU::OpName::update_exec_mask,
1236 AMDGPU::OpName::update_pred,
1237 AMDGPU::OpName::write,
1238 AMDGPU::OpName::omod,
1239 AMDGPU::OpName::dst_rel,
1240 AMDGPU::OpName::clamp,
1241 AMDGPU::OpName::src0_neg,
1242 AMDGPU::OpName::src0_rel,
1243 AMDGPU::OpName::src0_abs,
1244 AMDGPU::OpName::src0_sel,
1245 AMDGPU::OpName::src1_neg,
1246 AMDGPU::OpName::src1_rel,
1247 AMDGPU::OpName::src1_abs,
1248 AMDGPU::OpName::src1_sel,
1251 MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(),
1252 getSlotedOps(AMDGPU::OpName::pred_sel, Slot)));
1253 MIB->getOperand(getOperandIdx(Opcode, AMDGPU::OpName::pred_sel))
1254 .setReg(MO.getReg());
1256 for (unsigned i = 0; i < 14; i++) {
1257 MachineOperand &MO = MI->getOperand(
1258 getOperandIdx(MI->getOpcode(), getSlotedOps(Operands[i], Slot)));
1259 assert (MO.isImm());
1260 setImmOperand(MIB, Operands[i], MO.getImm());
1262 MIB->getOperand(20).setImm(0);
1266 MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
1267 MachineBasicBlock::iterator I,
1269 uint64_t Imm) const {
1270 MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg,
1271 AMDGPU::ALU_LITERAL_X);
1272 setImmOperand(MovImm, AMDGPU::OpName::literal, Imm);
1276 MachineInstr *R600InstrInfo::buildMovInstr(MachineBasicBlock *MBB,
1277 MachineBasicBlock::iterator I,
1278 unsigned DstReg, unsigned SrcReg) const {
1279 return buildDefaultInstruction(*MBB, I, AMDGPU::MOV, DstReg, SrcReg);
1282 int R600InstrInfo::getOperandIdx(const MachineInstr &MI, unsigned Op) const {
1283 return getOperandIdx(MI.getOpcode(), Op);
1286 int R600InstrInfo::getOperandIdx(unsigned Opcode, unsigned Op) const {
1287 return AMDGPU::getNamedOperandIdx(Opcode, Op);
1290 void R600InstrInfo::setImmOperand(MachineInstr *MI, unsigned Op,
1291 int64_t Imm) const {
1292 int Idx = getOperandIdx(*MI, Op);
1293 assert(Idx != -1 && "Operand not supported for this instruction.");
1294 assert(MI->getOperand(Idx).isImm());
1295 MI->getOperand(Idx).setImm(Imm);
1298 //===----------------------------------------------------------------------===//
1299 // Instruction flag getters/setters
1300 //===----------------------------------------------------------------------===//
1302 bool R600InstrInfo::hasFlagOperand(const MachineInstr &MI) const {
1303 return GET_FLAG_OPERAND_IDX(get(MI.getOpcode()).TSFlags) != 0;
1306 MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx,
1307 unsigned Flag) const {
1308 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
1311 // If we pass something other than the default value of Flag to this
1312 // function, it means we are want to set a flag on an instruction
1313 // that uses native encoding.
1314 assert(HAS_NATIVE_OPERANDS(TargetFlags));
1315 bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3;
1318 FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::clamp);
1321 FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::write);
1323 case MO_FLAG_NOT_LAST:
1325 FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::last);
1329 case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_neg); break;
1330 case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_neg); break;
1331 case 2: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src2_neg); break;
1336 assert(!IsOP3 && "Cannot set absolute value modifier for OP3 "
1340 case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_abs); break;
1341 case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_abs); break;
1349 assert(FlagIndex != -1 && "Flag not supported for this instruction");
1351 FlagIndex = GET_FLAG_OPERAND_IDX(TargetFlags);
1352 assert(FlagIndex != 0 &&
1353 "Instruction flags not supported for this instruction");
1356 MachineOperand &FlagOp = MI->getOperand(FlagIndex);
1357 assert(FlagOp.isImm());
1361 void R600InstrInfo::addFlag(MachineInstr *MI, unsigned Operand,
1362 unsigned Flag) const {
1363 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
1367 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
1368 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1369 if (Flag == MO_FLAG_NOT_LAST) {
1370 clearFlag(MI, Operand, MO_FLAG_LAST);
1371 } else if (Flag == MO_FLAG_MASK) {
1372 clearFlag(MI, Operand, Flag);
1377 MachineOperand &FlagOp = getFlagOp(MI, Operand);
1378 FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * Operand)));
1382 void R600InstrInfo::clearFlag(MachineInstr *MI, unsigned Operand,
1383 unsigned Flag) const {
1384 unsigned TargetFlags = get(MI->getOpcode()).TSFlags;
1385 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
1386 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1389 MachineOperand &FlagOp = getFlagOp(MI);
1390 unsigned InstFlags = FlagOp.getImm();
1391 InstFlags &= ~(Flag << (NUM_MO_FLAGS * Operand));
1392 FlagOp.setImm(InstFlags);