1 //===-- SIInstrInfo.cpp - SI Instruction Information ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief SI Implementation of TargetInstrInfo.
13 //===----------------------------------------------------------------------===//
16 #include "SIInstrInfo.h"
17 #include "AMDGPUTargetMachine.h"
18 #include "SIDefines.h"
19 #include "SIMachineFunctionInfo.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/MC/MCInstrDesc.h"
26 SIInstrInfo::SIInstrInfo(AMDGPUTargetMachine &tm)
27 : AMDGPUInstrInfo(tm),
30 //===----------------------------------------------------------------------===//
31 // TargetInstrInfo callbacks
32 //===----------------------------------------------------------------------===//
35 SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
36 MachineBasicBlock::iterator MI, DebugLoc DL,
37 unsigned DestReg, unsigned SrcReg,
40 // If we are trying to copy to or from SCC, there is a bug somewhere else in
41 // the backend. While it may be theoretically possible to do this, it should
42 // never be necessary.
43 assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC);
45 static const int16_t Sub0_15[] = {
46 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
47 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
48 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
49 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 0
52 static const int16_t Sub0_7[] = {
53 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
54 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 0
57 static const int16_t Sub0_3[] = {
58 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 0
61 static const int16_t Sub0_2[] = {
62 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, 0
65 static const int16_t Sub0_1[] = {
66 AMDGPU::sub0, AMDGPU::sub1, 0
70 const int16_t *SubIndices;
72 if (AMDGPU::M0 == DestReg) {
73 // Check if M0 isn't already set to this value
74 for (MachineBasicBlock::reverse_iterator E = MBB.rend(),
75 I = MachineBasicBlock::reverse_iterator(MI); I != E; ++I) {
77 if (!I->definesRegister(AMDGPU::M0))
80 unsigned Opc = I->getOpcode();
81 if (Opc != TargetOpcode::COPY && Opc != AMDGPU::S_MOV_B32)
84 if (!I->readsRegister(SrcReg))
87 // The copy isn't necessary
92 if (AMDGPU::SReg_32RegClass.contains(DestReg)) {
93 assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
94 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
95 .addReg(SrcReg, getKillRegState(KillSrc));
98 } else if (AMDGPU::SReg_64RegClass.contains(DestReg)) {
99 assert(AMDGPU::SReg_64RegClass.contains(SrcReg));
100 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
101 .addReg(SrcReg, getKillRegState(KillSrc));
104 } else if (AMDGPU::SReg_128RegClass.contains(DestReg)) {
105 assert(AMDGPU::SReg_128RegClass.contains(SrcReg));
106 Opcode = AMDGPU::S_MOV_B32;
109 } else if (AMDGPU::SReg_256RegClass.contains(DestReg)) {
110 assert(AMDGPU::SReg_256RegClass.contains(SrcReg));
111 Opcode = AMDGPU::S_MOV_B32;
114 } else if (AMDGPU::SReg_512RegClass.contains(DestReg)) {
115 assert(AMDGPU::SReg_512RegClass.contains(SrcReg));
116 Opcode = AMDGPU::S_MOV_B32;
117 SubIndices = Sub0_15;
119 } else if (AMDGPU::VReg_32RegClass.contains(DestReg)) {
120 assert(AMDGPU::VReg_32RegClass.contains(SrcReg) ||
121 AMDGPU::SReg_32RegClass.contains(SrcReg));
122 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
123 .addReg(SrcReg, getKillRegState(KillSrc));
126 } else if (AMDGPU::VReg_64RegClass.contains(DestReg)) {
127 assert(AMDGPU::VReg_64RegClass.contains(SrcReg) ||
128 AMDGPU::SReg_64RegClass.contains(SrcReg));
129 Opcode = AMDGPU::V_MOV_B32_e32;
132 } else if (AMDGPU::VReg_96RegClass.contains(DestReg)) {
133 assert(AMDGPU::VReg_96RegClass.contains(SrcReg));
134 Opcode = AMDGPU::V_MOV_B32_e32;
137 } else if (AMDGPU::VReg_128RegClass.contains(DestReg)) {
138 assert(AMDGPU::VReg_128RegClass.contains(SrcReg) ||
139 AMDGPU::SReg_128RegClass.contains(SrcReg));
140 Opcode = AMDGPU::V_MOV_B32_e32;
143 } else if (AMDGPU::VReg_256RegClass.contains(DestReg)) {
144 assert(AMDGPU::VReg_256RegClass.contains(SrcReg) ||
145 AMDGPU::SReg_256RegClass.contains(SrcReg));
146 Opcode = AMDGPU::V_MOV_B32_e32;
149 } else if (AMDGPU::VReg_512RegClass.contains(DestReg)) {
150 assert(AMDGPU::VReg_512RegClass.contains(SrcReg) ||
151 AMDGPU::SReg_512RegClass.contains(SrcReg));
152 Opcode = AMDGPU::V_MOV_B32_e32;
153 SubIndices = Sub0_15;
156 llvm_unreachable("Can't copy register!");
159 while (unsigned SubIdx = *SubIndices++) {
160 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
161 get(Opcode), RI.getSubReg(DestReg, SubIdx));
163 Builder.addReg(RI.getSubReg(SrcReg, SubIdx), getKillRegState(KillSrc));
166 Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
170 unsigned SIInstrInfo::commuteOpcode(unsigned Opcode) const {
173 // Try to map original to commuted opcode
174 if ((NewOpc = AMDGPU::getCommuteRev(Opcode)) != -1)
177 // Try to map commuted to original opcode
178 if ((NewOpc = AMDGPU::getCommuteOrig(Opcode)) != -1)
184 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
185 MachineBasicBlock::iterator MI,
186 unsigned SrcReg, bool isKill,
188 const TargetRegisterClass *RC,
189 const TargetRegisterInfo *TRI) const {
190 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
191 SIMachineFunctionInfo *MFI = MBB.getParent()->getInfo<SIMachineFunctionInfo>();
192 DebugLoc DL = MBB.findDebugLoc(MI);
193 unsigned KillFlag = isKill ? RegState::Kill : 0;
195 if (TRI->getCommonSubClass(RC, &AMDGPU::SGPR_32RegClass)) {
196 unsigned Lane = MFI->SpillTracker.getNextLane(MRI);
197 BuildMI(MBB, MI, DL, get(AMDGPU::V_WRITELANE_B32),
198 MFI->SpillTracker.LaneVGPR)
199 .addReg(SrcReg, KillFlag)
201 MFI->SpillTracker.addSpilledReg(FrameIndex, MFI->SpillTracker.LaneVGPR,
204 for (unsigned i = 0, e = RC->getSize() / 4; i != e; ++i) {
205 unsigned SubReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
206 BuildMI(MBB, MI, MBB.findDebugLoc(MI), get(AMDGPU::COPY), SubReg)
207 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i));
208 storeRegToStackSlot(MBB, MI, SubReg, isKill, FrameIndex + i,
209 &AMDGPU::SReg_32RegClass, TRI);
214 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
215 MachineBasicBlock::iterator MI,
216 unsigned DestReg, int FrameIndex,
217 const TargetRegisterClass *RC,
218 const TargetRegisterInfo *TRI) const {
219 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
220 SIMachineFunctionInfo *MFI = MBB.getParent()->getInfo<SIMachineFunctionInfo>();
221 DebugLoc DL = MBB.findDebugLoc(MI);
222 if (TRI->getCommonSubClass(RC, &AMDGPU::SReg_32RegClass)) {
223 SIMachineFunctionInfo::SpilledReg Spill =
224 MFI->SpillTracker.getSpilledReg(FrameIndex);
226 BuildMI(MBB, MI, DL, get(AMDGPU::V_READLANE_B32), DestReg)
230 for (unsigned i = 0, e = RC->getSize() / 4; i != e; ++i) {
231 unsigned Flags = RegState::Define;
233 Flags |= RegState::Undef;
235 unsigned SubReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
236 loadRegFromStackSlot(MBB, MI, SubReg, FrameIndex + i,
237 &AMDGPU::SReg_32RegClass, TRI);
238 BuildMI(MBB, MI, DL, get(AMDGPU::COPY))
239 .addReg(DestReg, Flags, RI.getSubRegFromChannel(i))
245 MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
248 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
249 if (MI->getNumOperands() < 3 || !MI->getOperand(1).isReg())
252 // Cannot commute VOP2 if src0 is SGPR.
253 if (isVOP2(MI->getOpcode()) && MI->getOperand(1).isReg() &&
254 RI.isSGPRClass(MRI.getRegClass(MI->getOperand(1).getReg())))
257 if (!MI->getOperand(2).isReg()) {
258 // XXX: Commute instructions with FPImm operands
259 if (NewMI || MI->getOperand(2).isFPImm() ||
260 (!isVOP2(MI->getOpcode()) && !isVOP3(MI->getOpcode()))) {
264 // XXX: Commute VOP3 instructions with abs and neg set.
265 if (isVOP3(MI->getOpcode()) &&
266 (MI->getOperand(AMDGPU::getNamedOperandIdx(MI->getOpcode(),
267 AMDGPU::OpName::abs)).getImm() ||
268 MI->getOperand(AMDGPU::getNamedOperandIdx(MI->getOpcode(),
269 AMDGPU::OpName::neg)).getImm()))
272 unsigned Reg = MI->getOperand(1).getReg();
273 unsigned SubReg = MI->getOperand(1).getSubReg();
274 MI->getOperand(1).ChangeToImmediate(MI->getOperand(2).getImm());
275 MI->getOperand(2).ChangeToRegister(Reg, false);
276 MI->getOperand(2).setSubReg(SubReg);
278 MI = TargetInstrInfo::commuteInstruction(MI, NewMI);
282 MI->setDesc(get(commuteOpcode(MI->getOpcode())));
287 MachineInstr *SIInstrInfo::buildMovInstr(MachineBasicBlock *MBB,
288 MachineBasicBlock::iterator I,
290 unsigned SrcReg) const {
291 return BuildMI(*MBB, I, MBB->findDebugLoc(I), get(AMDGPU::V_MOV_B32_e32),
292 DstReg) .addReg(SrcReg);
295 bool SIInstrInfo::isMov(unsigned Opcode) const {
297 default: return false;
298 case AMDGPU::S_MOV_B32:
299 case AMDGPU::S_MOV_B64:
300 case AMDGPU::V_MOV_B32_e32:
301 case AMDGPU::V_MOV_B32_e64:
307 SIInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
308 return RC != &AMDGPU::EXECRegRegClass;
312 SIInstrInfo::isTriviallyReMaterializable(const MachineInstr *MI,
313 AliasAnalysis *AA) const {
314 switch(MI->getOpcode()) {
315 default: return AMDGPUInstrInfo::isTriviallyReMaterializable(MI, AA);
316 case AMDGPU::S_MOV_B32:
317 case AMDGPU::S_MOV_B64:
318 case AMDGPU::V_MOV_B32_e32:
319 return MI->getOperand(1).isImm();
325 // Helper function generated by tablegen. We are wrapping this with
326 // an SIInstrInfo function that reutrns bool rather than int.
327 int isDS(uint16_t Opcode);
331 bool SIInstrInfo::isDS(uint16_t Opcode) const {
332 return ::AMDGPU::isDS(Opcode) != -1;
335 int SIInstrInfo::isMIMG(uint16_t Opcode) const {
336 return get(Opcode).TSFlags & SIInstrFlags::MIMG;
339 int SIInstrInfo::isSMRD(uint16_t Opcode) const {
340 return get(Opcode).TSFlags & SIInstrFlags::SMRD;
343 bool SIInstrInfo::isVOP1(uint16_t Opcode) const {
344 return get(Opcode).TSFlags & SIInstrFlags::VOP1;
347 bool SIInstrInfo::isVOP2(uint16_t Opcode) const {
348 return get(Opcode).TSFlags & SIInstrFlags::VOP2;
351 bool SIInstrInfo::isVOP3(uint16_t Opcode) const {
352 return get(Opcode).TSFlags & SIInstrFlags::VOP3;
355 bool SIInstrInfo::isVOPC(uint16_t Opcode) const {
356 return get(Opcode).TSFlags & SIInstrFlags::VOPC;
359 bool SIInstrInfo::isSALUInstr(const MachineInstr &MI) const {
360 return get(MI.getOpcode()).TSFlags & SIInstrFlags::SALU;
363 bool SIInstrInfo::isInlineConstant(const APInt &Imm) const {
364 int32_t Val = Imm.getSExtValue();
365 if (Val >= -16 && Val <= 64)
368 // The actual type of the operand does not seem to matter as long
369 // as the bits match one of the inline immediate values. For example:
371 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
372 // so it is a legal inline immediate.
374 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
375 // floating-point, so it is a legal inline immediate.
377 return (APInt::floatToBits(0.0f) == Imm) ||
378 (APInt::floatToBits(1.0f) == Imm) ||
379 (APInt::floatToBits(-1.0f) == Imm) ||
380 (APInt::floatToBits(0.5f) == Imm) ||
381 (APInt::floatToBits(-0.5f) == Imm) ||
382 (APInt::floatToBits(2.0f) == Imm) ||
383 (APInt::floatToBits(-2.0f) == Imm) ||
384 (APInt::floatToBits(4.0f) == Imm) ||
385 (APInt::floatToBits(-4.0f) == Imm);
388 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO) const {
390 return isInlineConstant(APInt(32, MO.getImm(), true));
393 APFloat FpImm = MO.getFPImm()->getValueAPF();
394 return isInlineConstant(FpImm.bitcastToAPInt());
400 bool SIInstrInfo::isLiteralConstant(const MachineOperand &MO) const {
401 return (MO.isImm() || MO.isFPImm()) && !isInlineConstant(MO);
404 bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
405 StringRef &ErrInfo) const {
406 uint16_t Opcode = MI->getOpcode();
407 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
408 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
409 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
411 // Make sure the number of operands is correct.
412 const MCInstrDesc &Desc = get(Opcode);
413 if (!Desc.isVariadic() &&
414 Desc.getNumOperands() != MI->getNumExplicitOperands()) {
415 ErrInfo = "Instruction has wrong number of operands.";
419 // Make sure the register classes are correct
420 for (unsigned i = 0, e = Desc.getNumOperands(); i != e; ++i) {
421 switch (Desc.OpInfo[i].OperandType) {
422 case MCOI::OPERAND_REGISTER:
424 case MCOI::OPERAND_IMMEDIATE:
425 if (!MI->getOperand(i).isImm() && !MI->getOperand(i).isFPImm()) {
426 ErrInfo = "Expected immediate, but got non-immediate";
434 if (!MI->getOperand(i).isReg())
437 int RegClass = Desc.OpInfo[i].RegClass;
438 if (RegClass != -1) {
439 unsigned Reg = MI->getOperand(i).getReg();
440 if (TargetRegisterInfo::isVirtualRegister(Reg))
443 const TargetRegisterClass *RC = RI.getRegClass(RegClass);
444 if (!RC->contains(Reg)) {
445 ErrInfo = "Operand has incorrect register class.";
453 if (isVOP1(Opcode) || isVOP2(Opcode) || isVOP3(Opcode) || isVOPC(Opcode)) {
454 unsigned ConstantBusCount = 0;
455 unsigned SGPRUsed = AMDGPU::NoRegister;
456 for (int i = 0, e = MI->getNumOperands(); i != e; ++i) {
457 const MachineOperand &MO = MI->getOperand(i);
458 if (MO.isReg() && MO.isUse() &&
459 !TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
461 // EXEC register uses the constant bus.
462 if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC)
465 // SGPRs use the constant bus
466 if (MO.getReg() == AMDGPU::M0 || MO.getReg() == AMDGPU::VCC ||
468 (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) ||
469 AMDGPU::SGPR_64RegClass.contains(MO.getReg())))) {
470 if (SGPRUsed != MO.getReg()) {
472 SGPRUsed = MO.getReg();
476 // Literal constants use the constant bus.
477 if (isLiteralConstant(MO))
480 if (ConstantBusCount > 1) {
481 ErrInfo = "VOP* instruction uses the constant bus more than once";
486 // Verify SRC1 for VOP2 and VOPC
487 if (Src1Idx != -1 && (isVOP2(Opcode) || isVOPC(Opcode))) {
488 const MachineOperand &Src1 = MI->getOperand(Src1Idx);
489 if (Src1.isImm() || Src1.isFPImm()) {
490 ErrInfo = "VOP[2C] src1 cannot be an immediate.";
496 if (isVOP3(Opcode)) {
497 if (Src0Idx != -1 && isLiteralConstant(MI->getOperand(Src0Idx))) {
498 ErrInfo = "VOP3 src0 cannot be a literal constant.";
501 if (Src1Idx != -1 && isLiteralConstant(MI->getOperand(Src1Idx))) {
502 ErrInfo = "VOP3 src1 cannot be a literal constant.";
505 if (Src2Idx != -1 && isLiteralConstant(MI->getOperand(Src2Idx))) {
506 ErrInfo = "VOP3 src2 cannot be a literal constant.";
513 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) {
514 switch (MI.getOpcode()) {
515 default: return AMDGPU::INSTRUCTION_LIST_END;
516 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE;
517 case AMDGPU::COPY: return AMDGPU::COPY;
518 case AMDGPU::PHI: return AMDGPU::PHI;
519 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG;
520 case AMDGPU::S_MOV_B32:
521 return MI.getOperand(1).isReg() ?
522 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32;
523 case AMDGPU::S_ADD_I32: return AMDGPU::V_ADD_I32_e32;
524 case AMDGPU::S_ADDC_U32: return AMDGPU::V_ADDC_U32_e32;
525 case AMDGPU::S_SUB_I32: return AMDGPU::V_SUB_I32_e32;
526 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32;
527 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e32;
528 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e32;
529 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e32;
530 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e32;
531 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e32;
532 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e32;
533 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e32;
534 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32;
535 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64;
536 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32;
537 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64;
538 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32;
539 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64;
540 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32;
541 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32;
542 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32;
543 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32;
544 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32;
545 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32;
546 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32;
547 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32;
548 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32;
549 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32;
550 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32;
554 bool SIInstrInfo::isSALUOpSupportedOnVALU(const MachineInstr &MI) const {
555 return getVALUOp(MI) != AMDGPU::INSTRUCTION_LIST_END;
558 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI,
559 unsigned OpNo) const {
560 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
561 const MCInstrDesc &Desc = get(MI.getOpcode());
562 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() ||
563 Desc.OpInfo[OpNo].RegClass == -1)
564 return MRI.getRegClass(MI.getOperand(OpNo).getReg());
566 unsigned RCID = Desc.OpInfo[OpNo].RegClass;
567 return RI.getRegClass(RCID);
570 bool SIInstrInfo::canReadVGPR(const MachineInstr &MI, unsigned OpNo) const {
571 switch (MI.getOpcode()) {
573 case AMDGPU::REG_SEQUENCE:
575 return RI.hasVGPRs(getOpRegClass(MI, 0));
577 return RI.hasVGPRs(getOpRegClass(MI, OpNo));
581 void SIInstrInfo::legalizeOpWithMove(MachineInstr *MI, unsigned OpIdx) const {
582 MachineBasicBlock::iterator I = MI;
583 MachineOperand &MO = MI->getOperand(OpIdx);
584 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
585 unsigned RCID = get(MI->getOpcode()).OpInfo[OpIdx].RegClass;
586 const TargetRegisterClass *RC = RI.getRegClass(RCID);
587 unsigned Opcode = AMDGPU::V_MOV_B32_e32;
589 Opcode = AMDGPU::COPY;
590 } else if (RI.isSGPRClass(RC)) {
591 Opcode = AMDGPU::S_MOV_B32;
594 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC);
595 unsigned Reg = MRI.createVirtualRegister(VRC);
596 BuildMI(*MI->getParent(), I, MI->getParent()->findDebugLoc(I), get(Opcode),
598 MO.ChangeToRegister(Reg, false);
601 unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI,
602 MachineRegisterInfo &MRI,
603 MachineOperand &SuperReg,
604 const TargetRegisterClass *SuperRC,
606 const TargetRegisterClass *SubRC)
608 assert(SuperReg.isReg());
610 unsigned NewSuperReg = MRI.createVirtualRegister(SuperRC);
611 unsigned SubReg = MRI.createVirtualRegister(SubRC);
613 // Just in case the super register is itself a sub-register, copy it to a new
614 // value so we don't need to wory about merging its subreg index with the
615 // SubIdx passed to this function. The register coalescer should be able to
616 // eliminate this extra copy.
617 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(TargetOpcode::COPY),
619 .addOperand(SuperReg);
621 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(TargetOpcode::COPY),
623 .addReg(NewSuperReg, 0, SubIdx);
627 MachineOperand SIInstrInfo::buildExtractSubRegOrImm(
628 MachineBasicBlock::iterator MII,
629 MachineRegisterInfo &MRI,
631 const TargetRegisterClass *SuperRC,
633 const TargetRegisterClass *SubRC) const {
635 // XXX - Is there a better way to do this?
636 if (SubIdx == AMDGPU::sub0)
637 return MachineOperand::CreateImm(Op.getImm() & 0xFFFFFFFF);
638 if (SubIdx == AMDGPU::sub1)
639 return MachineOperand::CreateImm(Op.getImm() >> 32);
641 llvm_unreachable("Unhandled register index for immediate");
644 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC,
646 return MachineOperand::CreateReg(SubReg, false);
649 unsigned SIInstrInfo::split64BitImm(SmallVectorImpl<MachineInstr *> &Worklist,
650 MachineBasicBlock::iterator MI,
651 MachineRegisterInfo &MRI,
652 const TargetRegisterClass *RC,
653 const MachineOperand &Op) const {
654 MachineBasicBlock *MBB = MI->getParent();
655 DebugLoc DL = MI->getDebugLoc();
656 unsigned LoDst = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
657 unsigned HiDst = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
658 unsigned Dst = MRI.createVirtualRegister(RC);
660 MachineInstr *Lo = BuildMI(*MBB, MI, DL, get(AMDGPU::S_MOV_B32),
662 .addImm(Op.getImm() & 0xFFFFFFFF);
663 MachineInstr *Hi = BuildMI(*MBB, MI, DL, get(AMDGPU::S_MOV_B32),
665 .addImm(Op.getImm() >> 32);
667 BuildMI(*MBB, MI, DL, get(TargetOpcode::REG_SEQUENCE), Dst)
669 .addImm(AMDGPU::sub0)
671 .addImm(AMDGPU::sub1);
673 Worklist.push_back(Lo);
674 Worklist.push_back(Hi);
679 void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
680 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
681 int Src0Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
682 AMDGPU::OpName::src0);
683 int Src1Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
684 AMDGPU::OpName::src1);
685 int Src2Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
686 AMDGPU::OpName::src2);
689 if (isVOP2(MI->getOpcode()) && Src1Idx != -1) {
690 MachineOperand &Src0 = MI->getOperand(Src0Idx);
691 MachineOperand &Src1 = MI->getOperand(Src1Idx);
693 // If the instruction implicitly reads VCC, we can't have any SGPR operands,
695 bool ReadsVCC = MI->readsRegister(AMDGPU::VCC, &RI);
696 if (ReadsVCC && Src0.isReg() &&
697 RI.isSGPRClass(MRI.getRegClass(Src0.getReg()))) {
698 legalizeOpWithMove(MI, Src0Idx);
702 if (ReadsVCC && Src1.isReg() &&
703 RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) {
704 legalizeOpWithMove(MI, Src1Idx);
708 // Legalize VOP2 instructions where src1 is not a VGPR. An SGPR input must
709 // be the first operand, and there can only be one.
710 if (Src1.isImm() || Src1.isFPImm() ||
711 (Src1.isReg() && RI.isSGPRClass(MRI.getRegClass(Src1.getReg())))) {
712 if (MI->isCommutable()) {
713 if (commuteInstruction(MI))
716 legalizeOpWithMove(MI, Src1Idx);
720 // XXX - Do any VOP3 instructions read VCC?
722 if (isVOP3(MI->getOpcode())) {
723 int VOP3Idx[3] = {Src0Idx, Src1Idx, Src2Idx};
724 unsigned SGPRReg = AMDGPU::NoRegister;
725 for (unsigned i = 0; i < 3; ++i) {
726 int Idx = VOP3Idx[i];
729 MachineOperand &MO = MI->getOperand(Idx);
732 if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg())))
733 continue; // VGPRs are legal
735 assert(MO.getReg() != AMDGPU::SCC && "SCC operand to VOP3 instruction");
737 if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.getReg()) {
738 SGPRReg = MO.getReg();
739 // We can use one SGPR in each VOP3 instruction.
742 } else if (!isLiteralConstant(MO)) {
743 // If it is not a register and not a literal constant, then it must be
744 // an inline constant which is always legal.
747 // If we make it this far, then the operand is not legal and we must
749 legalizeOpWithMove(MI, Idx);
753 // Legalize REG_SEQUENCE and PHI
754 // The register class of the operands much be the same type as the register
755 // class of the output.
756 if (MI->getOpcode() == AMDGPU::REG_SEQUENCE ||
757 MI->getOpcode() == AMDGPU::PHI) {
758 const TargetRegisterClass *RC = NULL, *SRC = NULL, *VRC = NULL;
759 for (unsigned i = 1, e = MI->getNumOperands(); i != e; i+=2) {
760 if (!MI->getOperand(i).isReg() ||
761 !TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg()))
763 const TargetRegisterClass *OpRC =
764 MRI.getRegClass(MI->getOperand(i).getReg());
765 if (RI.hasVGPRs(OpRC)) {
772 // If any of the operands are VGPR registers, then they all most be
773 // otherwise we will create illegal VGPR->SGPR copies when legalizing
775 if (VRC || !RI.isSGPRClass(getOpRegClass(*MI, 0))) {
778 VRC = RI.getEquivalentVGPRClass(SRC);
785 // Update all the operands so they have the same type.
786 for (unsigned i = 1, e = MI->getNumOperands(); i != e; i+=2) {
787 if (!MI->getOperand(i).isReg() ||
788 !TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg()))
790 unsigned DstReg = MRI.createVirtualRegister(RC);
791 MachineBasicBlock *InsertBB;
792 MachineBasicBlock::iterator Insert;
793 if (MI->getOpcode() == AMDGPU::REG_SEQUENCE) {
794 InsertBB = MI->getParent();
797 // MI is a PHI instruction.
798 InsertBB = MI->getOperand(i + 1).getMBB();
799 Insert = InsertBB->getFirstTerminator();
801 BuildMI(*InsertBB, Insert, MI->getDebugLoc(),
802 get(AMDGPU::COPY), DstReg)
803 .addOperand(MI->getOperand(i));
804 MI->getOperand(i).setReg(DstReg);
808 // Legalize MUBUF* instructions
809 // FIXME: If we start using the non-addr64 instructions for compute, we
810 // may need to legalize them here.
812 int SRsrcIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
813 AMDGPU::OpName::srsrc);
814 int VAddrIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
815 AMDGPU::OpName::vaddr);
816 if (SRsrcIdx != -1 && VAddrIdx != -1) {
817 const TargetRegisterClass *VAddrRC =
818 RI.getRegClass(get(MI->getOpcode()).OpInfo[VAddrIdx].RegClass);
820 if(VAddrRC->getSize() == 8 &&
821 MRI.getRegClass(MI->getOperand(SRsrcIdx).getReg()) != VAddrRC) {
822 // We have a MUBUF instruction that uses a 64-bit vaddr register and
823 // srsrc has the incorrect register class. In order to fix this, we
824 // need to extract the pointer from the resource descriptor (srsrc),
825 // add it to the value of vadd, then store the result in the vaddr
826 // operand. Then, we need to set the pointer field of the resource
827 // descriptor to zero.
829 MachineBasicBlock &MBB = *MI->getParent();
830 MachineOperand &SRsrcOp = MI->getOperand(SRsrcIdx);
831 MachineOperand &VAddrOp = MI->getOperand(VAddrIdx);
832 unsigned SRsrcPtrLo, SRsrcPtrHi, VAddrLo, VAddrHi;
833 unsigned NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
834 unsigned NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
835 unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
836 unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
837 unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
838 unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
839 unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
841 // SRsrcPtrLo = srsrc:sub0
842 SRsrcPtrLo = buildExtractSubReg(MI, MRI, SRsrcOp,
843 &AMDGPU::VReg_128RegClass, AMDGPU::sub0, &AMDGPU::VReg_32RegClass);
845 // SRsrcPtrHi = srsrc:sub1
846 SRsrcPtrHi = buildExtractSubReg(MI, MRI, SRsrcOp,
847 &AMDGPU::VReg_128RegClass, AMDGPU::sub1, &AMDGPU::VReg_32RegClass);
849 // VAddrLo = vaddr:sub0
850 VAddrLo = buildExtractSubReg(MI, MRI, VAddrOp,
851 &AMDGPU::VReg_64RegClass, AMDGPU::sub0, &AMDGPU::VReg_32RegClass);
853 // VAddrHi = vaddr:sub1
854 VAddrHi = buildExtractSubReg(MI, MRI, VAddrOp,
855 &AMDGPU::VReg_64RegClass, AMDGPU::sub1, &AMDGPU::VReg_32RegClass);
857 // NewVaddrLo = SRsrcPtrLo + VAddrLo
858 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::V_ADD_I32_e32),
862 .addReg(AMDGPU::VCC, RegState::Define | RegState::Implicit);
864 // NewVaddrHi = SRsrcPtrHi + VAddrHi
865 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::V_ADDC_U32_e32),
869 .addReg(AMDGPU::VCC, RegState::ImplicitDefine)
870 .addReg(AMDGPU::VCC, RegState::Implicit);
872 // NewVaddr = {NewVaddrHi, NewVaddrLo}
873 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
876 .addImm(AMDGPU::sub0)
878 .addImm(AMDGPU::sub1);
881 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B64),
885 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0}
886 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
888 .addImm(AMDGPU::RSRC_DATA_FORMAT & 0xFFFFFFFF);
890 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32}
891 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
893 .addImm(AMDGPU::RSRC_DATA_FORMAT >> 32);
895 // NewSRsrc = {Zero64, SRsrcFormat}
896 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
899 .addImm(AMDGPU::sub0_sub1)
900 .addReg(SRsrcFormatLo)
901 .addImm(AMDGPU::sub2)
902 .addReg(SRsrcFormatHi)
903 .addImm(AMDGPU::sub3);
905 // Update the instruction to use NewVaddr
906 MI->getOperand(VAddrIdx).setReg(NewVAddr);
907 // Update the instruction to use NewSRsrc
908 MI->getOperand(SRsrcIdx).setReg(NewSRsrc);
913 void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
914 SmallVector<MachineInstr *, 128> Worklist;
915 Worklist.push_back(&TopInst);
917 while (!Worklist.empty()) {
918 MachineInstr *Inst = Worklist.pop_back_val();
919 MachineBasicBlock *MBB = Inst->getParent();
920 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
922 unsigned Opcode = Inst->getOpcode();
924 // Handle some special cases
926 case AMDGPU::S_MOV_B64: {
927 DebugLoc DL = Inst->getDebugLoc();
929 // If the source operand is a register we can replace this with a
931 if (Inst->getOperand(1).isReg()) {
932 MachineInstr *Copy = BuildMI(*MBB, Inst, DL, get(TargetOpcode::COPY))
933 .addOperand(Inst->getOperand(0))
934 .addOperand(Inst->getOperand(1));
935 Worklist.push_back(Copy);
937 // Otherwise, we need to split this into two movs, because there is
938 // no 64-bit VALU move instruction.
939 unsigned Reg = Inst->getOperand(0).getReg();
940 unsigned Dst = split64BitImm(Worklist,
943 MRI.getRegClass(Reg),
944 Inst->getOperand(1));
945 MRI.replaceRegWith(Reg, Dst);
947 Inst->eraseFromParent();
950 case AMDGPU::S_AND_B64:
951 splitScalar64BitOp(Worklist, Inst, AMDGPU::S_AND_B32);
952 Inst->eraseFromParent();
955 case AMDGPU::S_OR_B64:
956 splitScalar64BitOp(Worklist, Inst, AMDGPU::S_OR_B32);
957 Inst->eraseFromParent();
960 case AMDGPU::S_XOR_B64:
961 splitScalar64BitOp(Worklist, Inst, AMDGPU::S_XOR_B32);
962 Inst->eraseFromParent();
965 case AMDGPU::S_NOT_B64:
966 splitScalar64BitOp(Worklist, Inst, AMDGPU::S_NOT_B32);
967 Inst->eraseFromParent();
970 case AMDGPU::S_BFE_U64:
971 case AMDGPU::S_BFE_I64:
972 case AMDGPU::S_BFM_B64:
973 llvm_unreachable("Moving this op to VALU not implemented");
976 unsigned NewOpcode = getVALUOp(*Inst);
977 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) {
978 // We cannot move this instruction to the VALU, so we should try to
979 // legalize its operands instead.
980 legalizeOperands(Inst);
984 // Use the new VALU Opcode.
985 const MCInstrDesc &NewDesc = get(NewOpcode);
986 Inst->setDesc(NewDesc);
988 // Remove any references to SCC. Vector instructions can't read from it, and
989 // We're just about to add the implicit use / defs of VCC, and we don't want
991 for (unsigned i = Inst->getNumOperands() - 1; i > 0; --i) {
992 MachineOperand &Op = Inst->getOperand(i);
993 if (Op.isReg() && Op.getReg() == AMDGPU::SCC)
994 Inst->RemoveOperand(i);
997 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) {
998 // We are converting these to a BFE, so we need to add the missing
999 // operands for the size and offset.
1000 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16;
1001 Inst->addOperand(MachineOperand::CreateImm(0));
1002 Inst->addOperand(MachineOperand::CreateImm(Size));
1004 // XXX - Other pointless operands. There are 4, but it seems you only need
1005 // 3 to not hit an assertion later in MCInstLower.
1006 Inst->addOperand(MachineOperand::CreateImm(0));
1007 Inst->addOperand(MachineOperand::CreateImm(0));
1008 Inst->addOperand(MachineOperand::CreateImm(0));
1009 Inst->addOperand(MachineOperand::CreateImm(0));
1012 addDescImplicitUseDef(NewDesc, Inst);
1014 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) {
1015 const MachineOperand &OffsetWidthOp = Inst->getOperand(2);
1016 // If we need to move this to VGPRs, we need to unpack the second operand
1017 // back into the 2 separate ones for bit offset and width.
1018 assert(OffsetWidthOp.isImm() &&
1019 "Scalar BFE is only implemented for constant width and offset");
1020 uint32_t Imm = OffsetWidthOp.getImm();
1022 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
1023 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
1025 Inst->RemoveOperand(2); // Remove old immediate.
1026 Inst->addOperand(MachineOperand::CreateImm(Offset));
1027 Inst->addOperand(MachineOperand::CreateImm(BitWidth));
1029 Inst->addOperand(MachineOperand::CreateImm(0));
1030 Inst->addOperand(MachineOperand::CreateImm(0));
1031 Inst->addOperand(MachineOperand::CreateImm(0));
1032 Inst->addOperand(MachineOperand::CreateImm(0));
1035 // Update the destination register class.
1037 const TargetRegisterClass *NewDstRC = getOpRegClass(*Inst, 0);
1040 // For target instructions, getOpRegClass just returns the virtual
1041 // register class associated with the operand, so we need to find an
1042 // equivalent VGPR register class in order to move the instruction to the
1046 case AMDGPU::REG_SEQUENCE:
1047 case AMDGPU::INSERT_SUBREG:
1048 if (RI.hasVGPRs(NewDstRC))
1050 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
1058 unsigned DstReg = Inst->getOperand(0).getReg();
1059 unsigned NewDstReg = MRI.createVirtualRegister(NewDstRC);
1060 MRI.replaceRegWith(DstReg, NewDstReg);
1062 // Legalize the operands
1063 legalizeOperands(Inst);
1065 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(NewDstReg),
1066 E = MRI.use_end(); I != E; ++I) {
1067 MachineInstr &UseMI = *I->getParent();
1068 if (!canReadVGPR(UseMI, I.getOperandNo())) {
1069 Worklist.push_back(&UseMI);
1075 //===----------------------------------------------------------------------===//
1076 // Indirect addressing callbacks
1077 //===----------------------------------------------------------------------===//
1079 unsigned SIInstrInfo::calculateIndirectAddress(unsigned RegIndex,
1080 unsigned Channel) const {
1081 assert(Channel == 0);
1085 const TargetRegisterClass *SIInstrInfo::getIndirectAddrRegClass() const {
1086 return &AMDGPU::VReg_32RegClass;
1089 void SIInstrInfo::splitScalar64BitOp(SmallVectorImpl<MachineInstr *> &Worklist,
1091 unsigned Opcode) const {
1092 MachineBasicBlock &MBB = *Inst->getParent();
1093 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1095 MachineOperand &Dest = Inst->getOperand(0);
1096 MachineOperand &Src0 = Inst->getOperand(1);
1097 MachineOperand &Src1 = Inst->getOperand(2);
1098 DebugLoc DL = Inst->getDebugLoc();
1100 MachineBasicBlock::iterator MII = Inst;
1102 const MCInstrDesc &InstDesc = get(Opcode);
1103 const TargetRegisterClass *Src0RC = Src0.isReg() ?
1104 MRI.getRegClass(Src0.getReg()) :
1105 &AMDGPU::SGPR_32RegClass;
1107 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
1108 const TargetRegisterClass *Src1RC = Src1.isReg() ?
1109 MRI.getRegClass(Src1.getReg()) :
1110 &AMDGPU::SGPR_32RegClass;
1112 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0);
1114 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
1115 AMDGPU::sub0, Src0SubRC);
1116 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
1117 AMDGPU::sub0, Src1SubRC);
1119 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
1120 const TargetRegisterClass *DestSubRC = RI.getSubRegClass(DestRC, AMDGPU::sub0);
1122 unsigned DestSub0 = MRI.createVirtualRegister(DestRC);
1123 MachineInstr *LoHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub0)
1124 .addOperand(SrcReg0Sub0)
1125 .addOperand(SrcReg1Sub0);
1127 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
1128 AMDGPU::sub1, Src0SubRC);
1129 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
1130 AMDGPU::sub1, Src1SubRC);
1132 unsigned DestSub1 = MRI.createVirtualRegister(DestSubRC);
1133 MachineInstr *HiHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub1)
1134 .addOperand(SrcReg0Sub1)
1135 .addOperand(SrcReg1Sub1);
1137 unsigned FullDestReg = MRI.createVirtualRegister(DestRC);
1138 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
1140 .addImm(AMDGPU::sub0)
1142 .addImm(AMDGPU::sub1);
1144 MRI.replaceRegWith(Dest.getReg(), FullDestReg);
1146 // Try to legalize the operands in case we need to swap the order to keep it
1148 Worklist.push_back(LoHalf);
1149 Worklist.push_back(HiHalf);
1152 void SIInstrInfo::addDescImplicitUseDef(const MCInstrDesc &NewDesc,
1153 MachineInstr *Inst) const {
1154 // Add the implict and explicit register definitions.
1155 if (NewDesc.ImplicitUses) {
1156 for (unsigned i = 0; NewDesc.ImplicitUses[i]; ++i) {
1157 unsigned Reg = NewDesc.ImplicitUses[i];
1158 Inst->addOperand(MachineOperand::CreateReg(Reg, false, true));
1162 if (NewDesc.ImplicitDefs) {
1163 for (unsigned i = 0; NewDesc.ImplicitDefs[i]; ++i) {
1164 unsigned Reg = NewDesc.ImplicitDefs[i];
1165 Inst->addOperand(MachineOperand::CreateReg(Reg, true, true));
1170 MachineInstrBuilder SIInstrInfo::buildIndirectWrite(
1171 MachineBasicBlock *MBB,
1172 MachineBasicBlock::iterator I,
1174 unsigned Address, unsigned OffsetReg) const {
1175 const DebugLoc &DL = MBB->findDebugLoc(I);
1176 unsigned IndirectBaseReg = AMDGPU::VReg_32RegClass.getRegister(
1177 getIndirectIndexBegin(*MBB->getParent()));
1179 return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_DST_V1))
1180 .addReg(IndirectBaseReg, RegState::Define)
1181 .addOperand(I->getOperand(0))
1182 .addReg(IndirectBaseReg)
1188 MachineInstrBuilder SIInstrInfo::buildIndirectRead(
1189 MachineBasicBlock *MBB,
1190 MachineBasicBlock::iterator I,
1192 unsigned Address, unsigned OffsetReg) const {
1193 const DebugLoc &DL = MBB->findDebugLoc(I);
1194 unsigned IndirectBaseReg = AMDGPU::VReg_32RegClass.getRegister(
1195 getIndirectIndexBegin(*MBB->getParent()));
1197 return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_SRC))
1198 .addOperand(I->getOperand(0))
1199 .addOperand(I->getOperand(1))
1200 .addReg(IndirectBaseReg)
1206 void SIInstrInfo::reserveIndirectRegisters(BitVector &Reserved,
1207 const MachineFunction &MF) const {
1208 int End = getIndirectIndexEnd(MF);
1209 int Begin = getIndirectIndexBegin(MF);
1215 for (int Index = Begin; Index <= End; ++Index)
1216 Reserved.set(AMDGPU::VReg_32RegClass.getRegister(Index));
1218 for (int Index = std::max(0, Begin - 1); Index <= End; ++Index)
1219 Reserved.set(AMDGPU::VReg_64RegClass.getRegister(Index));
1221 for (int Index = std::max(0, Begin - 2); Index <= End; ++Index)
1222 Reserved.set(AMDGPU::VReg_96RegClass.getRegister(Index));
1224 for (int Index = std::max(0, Begin - 3); Index <= End; ++Index)
1225 Reserved.set(AMDGPU::VReg_128RegClass.getRegister(Index));
1227 for (int Index = std::max(0, Begin - 7); Index <= End; ++Index)
1228 Reserved.set(AMDGPU::VReg_256RegClass.getRegister(Index));
1230 for (int Index = std::max(0, Begin - 15); Index <= End; ++Index)
1231 Reserved.set(AMDGPU::VReg_512RegClass.getRegister(Index));