1 //===-- SIInstrInfo.cpp - SI Instruction Information ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief SI Implementation of TargetInstrInfo.
13 //===----------------------------------------------------------------------===//
16 #include "SIInstrInfo.h"
17 #include "AMDGPUTargetMachine.h"
18 #include "SIDefines.h"
19 #include "SIMachineFunctionInfo.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/MC/MCInstrDesc.h"
26 SIInstrInfo::SIInstrInfo(AMDGPUTargetMachine &tm)
27 : AMDGPUInstrInfo(tm),
31 const SIRegisterInfo &SIInstrInfo::getRegisterInfo() const {
35 //===----------------------------------------------------------------------===//
36 // TargetInstrInfo callbacks
37 //===----------------------------------------------------------------------===//
40 SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
41 MachineBasicBlock::iterator MI, DebugLoc DL,
42 unsigned DestReg, unsigned SrcReg,
45 // If we are trying to copy to or from SCC, there is a bug somewhere else in
46 // the backend. While it may be theoretically possible to do this, it should
47 // never be necessary.
48 assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC);
50 static const int16_t Sub0_15[] = {
51 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
52 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
53 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
54 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 0
57 static const int16_t Sub0_7[] = {
58 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
59 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 0
62 static const int16_t Sub0_3[] = {
63 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 0
66 static const int16_t Sub0_2[] = {
67 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, 0
70 static const int16_t Sub0_1[] = {
71 AMDGPU::sub0, AMDGPU::sub1, 0
75 const int16_t *SubIndices;
77 if (AMDGPU::M0 == DestReg) {
78 // Check if M0 isn't already set to this value
79 for (MachineBasicBlock::reverse_iterator E = MBB.rend(),
80 I = MachineBasicBlock::reverse_iterator(MI); I != E; ++I) {
82 if (!I->definesRegister(AMDGPU::M0))
85 unsigned Opc = I->getOpcode();
86 if (Opc != TargetOpcode::COPY && Opc != AMDGPU::S_MOV_B32)
89 if (!I->readsRegister(SrcReg))
92 // The copy isn't necessary
97 if (AMDGPU::SReg_32RegClass.contains(DestReg)) {
98 assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
99 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
100 .addReg(SrcReg, getKillRegState(KillSrc));
103 } else if (AMDGPU::SReg_64RegClass.contains(DestReg)) {
104 assert(AMDGPU::SReg_64RegClass.contains(SrcReg));
105 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
106 .addReg(SrcReg, getKillRegState(KillSrc));
109 } else if (AMDGPU::SReg_128RegClass.contains(DestReg)) {
110 assert(AMDGPU::SReg_128RegClass.contains(SrcReg));
111 Opcode = AMDGPU::S_MOV_B32;
114 } else if (AMDGPU::SReg_256RegClass.contains(DestReg)) {
115 assert(AMDGPU::SReg_256RegClass.contains(SrcReg));
116 Opcode = AMDGPU::S_MOV_B32;
119 } else if (AMDGPU::SReg_512RegClass.contains(DestReg)) {
120 assert(AMDGPU::SReg_512RegClass.contains(SrcReg));
121 Opcode = AMDGPU::S_MOV_B32;
122 SubIndices = Sub0_15;
124 } else if (AMDGPU::VReg_32RegClass.contains(DestReg)) {
125 assert(AMDGPU::VReg_32RegClass.contains(SrcReg) ||
126 AMDGPU::SReg_32RegClass.contains(SrcReg));
127 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
128 .addReg(SrcReg, getKillRegState(KillSrc));
131 } else if (AMDGPU::VReg_64RegClass.contains(DestReg)) {
132 assert(AMDGPU::VReg_64RegClass.contains(SrcReg) ||
133 AMDGPU::SReg_64RegClass.contains(SrcReg));
134 Opcode = AMDGPU::V_MOV_B32_e32;
137 } else if (AMDGPU::VReg_96RegClass.contains(DestReg)) {
138 assert(AMDGPU::VReg_96RegClass.contains(SrcReg));
139 Opcode = AMDGPU::V_MOV_B32_e32;
142 } else if (AMDGPU::VReg_128RegClass.contains(DestReg)) {
143 assert(AMDGPU::VReg_128RegClass.contains(SrcReg) ||
144 AMDGPU::SReg_128RegClass.contains(SrcReg));
145 Opcode = AMDGPU::V_MOV_B32_e32;
148 } else if (AMDGPU::VReg_256RegClass.contains(DestReg)) {
149 assert(AMDGPU::VReg_256RegClass.contains(SrcReg) ||
150 AMDGPU::SReg_256RegClass.contains(SrcReg));
151 Opcode = AMDGPU::V_MOV_B32_e32;
154 } else if (AMDGPU::VReg_512RegClass.contains(DestReg)) {
155 assert(AMDGPU::VReg_512RegClass.contains(SrcReg) ||
156 AMDGPU::SReg_512RegClass.contains(SrcReg));
157 Opcode = AMDGPU::V_MOV_B32_e32;
158 SubIndices = Sub0_15;
161 llvm_unreachable("Can't copy register!");
164 while (unsigned SubIdx = *SubIndices++) {
165 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
166 get(Opcode), RI.getSubReg(DestReg, SubIdx));
168 Builder.addReg(RI.getSubReg(SrcReg, SubIdx), getKillRegState(KillSrc));
171 Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
175 unsigned SIInstrInfo::commuteOpcode(unsigned Opcode) const {
178 // Try to map original to commuted opcode
179 if ((NewOpc = AMDGPU::getCommuteRev(Opcode)) != -1)
182 // Try to map commuted to original opcode
183 if ((NewOpc = AMDGPU::getCommuteOrig(Opcode)) != -1)
189 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
190 MachineBasicBlock::iterator MI,
191 unsigned SrcReg, bool isKill,
193 const TargetRegisterClass *RC,
194 const TargetRegisterInfo *TRI) const {
195 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
196 SIMachineFunctionInfo *MFI = MBB.getParent()->getInfo<SIMachineFunctionInfo>();
197 DebugLoc DL = MBB.findDebugLoc(MI);
198 unsigned KillFlag = isKill ? RegState::Kill : 0;
200 if (TRI->getCommonSubClass(RC, &AMDGPU::SGPR_32RegClass)) {
201 unsigned Lane = MFI->SpillTracker.getNextLane(MRI);
202 BuildMI(MBB, MI, DL, get(AMDGPU::V_WRITELANE_B32),
203 MFI->SpillTracker.LaneVGPR)
204 .addReg(SrcReg, KillFlag)
206 MFI->SpillTracker.addSpilledReg(FrameIndex, MFI->SpillTracker.LaneVGPR,
209 for (unsigned i = 0, e = RC->getSize() / 4; i != e; ++i) {
210 unsigned SubReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
211 BuildMI(MBB, MI, MBB.findDebugLoc(MI), get(AMDGPU::COPY), SubReg)
212 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i));
213 storeRegToStackSlot(MBB, MI, SubReg, isKill, FrameIndex + i,
214 &AMDGPU::SReg_32RegClass, TRI);
219 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
220 MachineBasicBlock::iterator MI,
221 unsigned DestReg, int FrameIndex,
222 const TargetRegisterClass *RC,
223 const TargetRegisterInfo *TRI) const {
224 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
225 SIMachineFunctionInfo *MFI = MBB.getParent()->getInfo<SIMachineFunctionInfo>();
226 DebugLoc DL = MBB.findDebugLoc(MI);
227 if (TRI->getCommonSubClass(RC, &AMDGPU::SReg_32RegClass)) {
228 SIMachineFunctionInfo::SpilledReg Spill =
229 MFI->SpillTracker.getSpilledReg(FrameIndex);
231 BuildMI(MBB, MI, DL, get(AMDGPU::V_READLANE_B32), DestReg)
235 for (unsigned i = 0, e = RC->getSize() / 4; i != e; ++i) {
236 unsigned Flags = RegState::Define;
238 Flags |= RegState::Undef;
240 unsigned SubReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
241 loadRegFromStackSlot(MBB, MI, SubReg, FrameIndex + i,
242 &AMDGPU::SReg_32RegClass, TRI);
243 BuildMI(MBB, MI, DL, get(AMDGPU::COPY))
244 .addReg(DestReg, Flags, RI.getSubRegFromChannel(i))
250 MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
253 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
254 if (MI->getNumOperands() < 3 || !MI->getOperand(1).isReg())
257 // Cannot commute VOP2 if src0 is SGPR.
258 if (isVOP2(MI->getOpcode()) && MI->getOperand(1).isReg() &&
259 RI.isSGPRClass(MRI.getRegClass(MI->getOperand(1).getReg())))
262 if (!MI->getOperand(2).isReg()) {
263 // XXX: Commute instructions with FPImm operands
264 if (NewMI || MI->getOperand(2).isFPImm() ||
265 (!isVOP2(MI->getOpcode()) && !isVOP3(MI->getOpcode()))) {
269 // XXX: Commute VOP3 instructions with abs and neg set.
270 if (isVOP3(MI->getOpcode()) &&
271 (MI->getOperand(AMDGPU::getNamedOperandIdx(MI->getOpcode(),
272 AMDGPU::OpName::abs)).getImm() ||
273 MI->getOperand(AMDGPU::getNamedOperandIdx(MI->getOpcode(),
274 AMDGPU::OpName::neg)).getImm()))
277 unsigned Reg = MI->getOperand(1).getReg();
278 unsigned SubReg = MI->getOperand(1).getSubReg();
279 MI->getOperand(1).ChangeToImmediate(MI->getOperand(2).getImm());
280 MI->getOperand(2).ChangeToRegister(Reg, false);
281 MI->getOperand(2).setSubReg(SubReg);
283 MI = TargetInstrInfo::commuteInstruction(MI, NewMI);
287 MI->setDesc(get(commuteOpcode(MI->getOpcode())));
292 MachineInstr *SIInstrInfo::buildMovInstr(MachineBasicBlock *MBB,
293 MachineBasicBlock::iterator I,
295 unsigned SrcReg) const {
296 return BuildMI(*MBB, I, MBB->findDebugLoc(I), get(AMDGPU::V_MOV_B32_e32),
297 DstReg) .addReg(SrcReg);
300 bool SIInstrInfo::isMov(unsigned Opcode) const {
302 default: return false;
303 case AMDGPU::S_MOV_B32:
304 case AMDGPU::S_MOV_B64:
305 case AMDGPU::V_MOV_B32_e32:
306 case AMDGPU::V_MOV_B32_e64:
312 SIInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
313 return RC != &AMDGPU::EXECRegRegClass;
318 // Helper function generated by tablegen. We are wrapping this with
319 // an SIInstrInfo function that reutrns bool rather than int.
320 int isDS(uint16_t Opcode);
324 bool SIInstrInfo::isDS(uint16_t Opcode) const {
325 return ::AMDGPU::isDS(Opcode) != -1;
328 int SIInstrInfo::isMIMG(uint16_t Opcode) const {
329 return get(Opcode).TSFlags & SIInstrFlags::MIMG;
332 int SIInstrInfo::isSMRD(uint16_t Opcode) const {
333 return get(Opcode).TSFlags & SIInstrFlags::SMRD;
336 bool SIInstrInfo::isVOP1(uint16_t Opcode) const {
337 return get(Opcode).TSFlags & SIInstrFlags::VOP1;
340 bool SIInstrInfo::isVOP2(uint16_t Opcode) const {
341 return get(Opcode).TSFlags & SIInstrFlags::VOP2;
344 bool SIInstrInfo::isVOP3(uint16_t Opcode) const {
345 return get(Opcode).TSFlags & SIInstrFlags::VOP3;
348 bool SIInstrInfo::isVOPC(uint16_t Opcode) const {
349 return get(Opcode).TSFlags & SIInstrFlags::VOPC;
352 bool SIInstrInfo::isSALUInstr(const MachineInstr &MI) const {
353 return get(MI.getOpcode()).TSFlags & SIInstrFlags::SALU;
356 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO) const {
358 return MO.getImm() >= -16 && MO.getImm() <= 64;
361 return MO.getFPImm()->isExactlyValue(0.0) ||
362 MO.getFPImm()->isExactlyValue(0.5) ||
363 MO.getFPImm()->isExactlyValue(-0.5) ||
364 MO.getFPImm()->isExactlyValue(1.0) ||
365 MO.getFPImm()->isExactlyValue(-1.0) ||
366 MO.getFPImm()->isExactlyValue(2.0) ||
367 MO.getFPImm()->isExactlyValue(-2.0) ||
368 MO.getFPImm()->isExactlyValue(4.0) ||
369 MO.getFPImm()->isExactlyValue(-4.0);
374 bool SIInstrInfo::isLiteralConstant(const MachineOperand &MO) const {
375 return (MO.isImm() || MO.isFPImm()) && !isInlineConstant(MO);
378 bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
379 StringRef &ErrInfo) const {
380 uint16_t Opcode = MI->getOpcode();
381 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
382 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
383 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
386 if (isVOP1(Opcode) || isVOP2(Opcode) || isVOP3(Opcode) || isVOPC(Opcode)) {
387 unsigned ConstantBusCount = 0;
388 unsigned SGPRUsed = AMDGPU::NoRegister;
389 for (int i = 0, e = MI->getNumOperands(); i != e; ++i) {
390 const MachineOperand &MO = MI->getOperand(i);
391 if (MO.isReg() && MO.isUse() &&
392 !TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
394 // EXEC register uses the constant bus.
395 if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC)
398 // SGPRs use the constant bus
399 if (MO.getReg() == AMDGPU::M0 || MO.getReg() == AMDGPU::VCC ||
401 (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) ||
402 AMDGPU::SGPR_64RegClass.contains(MO.getReg())))) {
403 if (SGPRUsed != MO.getReg()) {
405 SGPRUsed = MO.getReg();
409 // Literal constants use the constant bus.
410 if (isLiteralConstant(MO))
413 if (ConstantBusCount > 1) {
414 ErrInfo = "VOP* instruction uses the constant bus more than once";
419 // Verify SRC1 for VOP2 and VOPC
420 if (Src1Idx != -1 && (isVOP2(Opcode) || isVOPC(Opcode))) {
421 const MachineOperand &Src1 = MI->getOperand(Src1Idx);
422 if (Src1.isImm() || Src1.isFPImm()) {
423 ErrInfo = "VOP[2C] src1 cannot be an immediate.";
429 if (isVOP3(Opcode)) {
430 if (Src0Idx != -1 && isLiteralConstant(MI->getOperand(Src0Idx))) {
431 ErrInfo = "VOP3 src0 cannot be a literal constant.";
434 if (Src1Idx != -1 && isLiteralConstant(MI->getOperand(Src1Idx))) {
435 ErrInfo = "VOP3 src1 cannot be a literal constant.";
438 if (Src2Idx != -1 && isLiteralConstant(MI->getOperand(Src2Idx))) {
439 ErrInfo = "VOP3 src2 cannot be a literal constant.";
446 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) {
447 switch (MI.getOpcode()) {
448 default: return AMDGPU::INSTRUCTION_LIST_END;
449 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE;
450 case AMDGPU::COPY: return AMDGPU::COPY;
451 case AMDGPU::PHI: return AMDGPU::PHI;
452 case AMDGPU::S_ADD_I32: return AMDGPU::V_ADD_I32_e32;
453 case AMDGPU::S_ADDC_U32: return AMDGPU::V_ADDC_U32_e32;
454 case AMDGPU::S_SUB_I32: return AMDGPU::V_SUB_I32_e32;
455 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32;
456 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32;
457 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64;
458 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32;
459 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64;
460 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32;
461 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64;
465 bool SIInstrInfo::isSALUOpSupportedOnVALU(const MachineInstr &MI) const {
466 return getVALUOp(MI) != AMDGPU::INSTRUCTION_LIST_END;
469 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI,
470 unsigned OpNo) const {
471 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
472 const MCInstrDesc &Desc = get(MI.getOpcode());
473 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() ||
474 Desc.OpInfo[OpNo].RegClass == -1)
475 return MRI.getRegClass(MI.getOperand(OpNo).getReg());
477 unsigned RCID = Desc.OpInfo[OpNo].RegClass;
478 return RI.getRegClass(RCID);
481 bool SIInstrInfo::canReadVGPR(const MachineInstr &MI, unsigned OpNo) const {
482 switch (MI.getOpcode()) {
484 case AMDGPU::REG_SEQUENCE:
485 return RI.hasVGPRs(getOpRegClass(MI, 0));
487 return RI.hasVGPRs(getOpRegClass(MI, OpNo));
491 void SIInstrInfo::legalizeOpWithMove(MachineInstr *MI, unsigned OpIdx) const {
492 MachineBasicBlock::iterator I = MI;
493 MachineOperand &MO = MI->getOperand(OpIdx);
494 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
495 unsigned RCID = get(MI->getOpcode()).OpInfo[OpIdx].RegClass;
496 const TargetRegisterClass *RC = RI.getRegClass(RCID);
497 unsigned Opcode = AMDGPU::V_MOV_B32_e32;
499 Opcode = AMDGPU::COPY;
500 } else if (RI.isSGPRClass(RC)) {
501 Opcode = AMDGPU::S_MOV_B32;
504 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC);
505 unsigned Reg = MRI.createVirtualRegister(VRC);
506 BuildMI(*MI->getParent(), I, MI->getParent()->findDebugLoc(I), get(Opcode),
508 MO.ChangeToRegister(Reg, false);
511 void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
512 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
513 int Src0Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
514 AMDGPU::OpName::src0);
515 int Src1Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
516 AMDGPU::OpName::src1);
517 int Src2Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
518 AMDGPU::OpName::src2);
521 if (isVOP2(MI->getOpcode()) && Src1Idx != -1) {
522 MachineOperand &Src0 = MI->getOperand(Src0Idx);
523 MachineOperand &Src1 = MI->getOperand(Src1Idx);
525 // If the instruction implicitly reads VCC, we can't have any SGPR operands,
527 bool ReadsVCC = MI->readsRegister(AMDGPU::VCC, &RI);
528 if (ReadsVCC && Src0.isReg() &&
529 RI.isSGPRClass(MRI.getRegClass(Src0.getReg()))) {
530 legalizeOpWithMove(MI, Src0Idx);
534 if (ReadsVCC && Src1.isReg() &&
535 RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) {
536 legalizeOpWithMove(MI, Src1Idx);
540 // Legalize VOP2 instructions where src1 is not a VGPR. An SGPR input must
541 // be the first operand, and there can only be one.
542 if (Src1.isImm() || Src1.isFPImm() ||
543 (Src1.isReg() && RI.isSGPRClass(MRI.getRegClass(Src1.getReg())))) {
544 if (MI->isCommutable()) {
545 if (commuteInstruction(MI))
548 legalizeOpWithMove(MI, Src1Idx);
552 // XXX - Do any VOP3 instructions read VCC?
554 if (isVOP3(MI->getOpcode())) {
555 int VOP3Idx[3] = {Src0Idx, Src1Idx, Src2Idx};
556 unsigned SGPRReg = AMDGPU::NoRegister;
557 for (unsigned i = 0; i < 3; ++i) {
558 int Idx = VOP3Idx[i];
561 MachineOperand &MO = MI->getOperand(Idx);
564 if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg())))
565 continue; // VGPRs are legal
567 assert(MO.getReg() != AMDGPU::SCC && "SCC operand to VOP3 instruction");
569 if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.getReg()) {
570 SGPRReg = MO.getReg();
571 // We can use one SGPR in each VOP3 instruction.
574 } else if (!isLiteralConstant(MO)) {
575 // If it is not a register and not a literal constant, then it must be
576 // an inline constant which is always legal.
579 // If we make it this far, then the operand is not legal and we must
581 legalizeOpWithMove(MI, Idx);
585 // Legalize REG_SEQUENCE
586 // The register class of the operands much be the same type as the register
587 // class of the output.
588 if (MI->getOpcode() == AMDGPU::REG_SEQUENCE) {
589 const TargetRegisterClass *RC = NULL, *SRC = NULL, *VRC = NULL;
590 for (unsigned i = 1, e = MI->getNumOperands(); i != e; i+=2) {
591 if (!MI->getOperand(i).isReg() ||
592 !TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg()))
594 const TargetRegisterClass *OpRC =
595 MRI.getRegClass(MI->getOperand(i).getReg());
596 if (RI.hasVGPRs(OpRC)) {
603 // If any of the operands are VGPR registers, then they all most be
604 // otherwise we will create illegal VGPR->SGPR copies when legalizing
606 if (VRC || !RI.isSGPRClass(getOpRegClass(*MI, 0))) {
609 VRC = RI.getEquivalentVGPRClass(SRC);
616 // Update all the operands so they have the same type.
617 for (unsigned i = 1, e = MI->getNumOperands(); i != e; i+=2) {
618 if (!MI->getOperand(i).isReg() ||
619 !TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg()))
621 unsigned DstReg = MRI.createVirtualRegister(RC);
622 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
623 get(AMDGPU::COPY), DstReg)
624 .addOperand(MI->getOperand(i));
625 MI->getOperand(i).setReg(DstReg);
630 void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
631 SmallVector<MachineInstr *, 128> Worklist;
632 Worklist.push_back(&TopInst);
634 while (!Worklist.empty()) {
635 MachineInstr *Inst = Worklist.pop_back_val();
636 unsigned NewOpcode = getVALUOp(*Inst);
637 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END)
640 MachineRegisterInfo &MRI = Inst->getParent()->getParent()->getRegInfo();
642 // Use the new VALU Opcode.
643 const MCInstrDesc &NewDesc = get(NewOpcode);
644 Inst->setDesc(NewDesc);
646 // Remove any references to SCC. Vector instructions can't read from it, and
647 // We're just about to add the implicit use / defs of VCC, and we don't want
649 for (unsigned i = Inst->getNumOperands() - 1; i > 0; --i) {
650 MachineOperand &Op = Inst->getOperand(i);
651 if (Op.isReg() && Op.getReg() == AMDGPU::SCC)
652 Inst->RemoveOperand(i);
655 // Add the implict and explicit register definitions.
656 if (NewDesc.ImplicitUses) {
657 for (unsigned i = 0; NewDesc.ImplicitUses[i]; ++i) {
658 unsigned Reg = NewDesc.ImplicitUses[i];
659 Inst->addOperand(MachineOperand::CreateReg(Reg, false, true));
663 if (NewDesc.ImplicitDefs) {
664 for (unsigned i = 0; NewDesc.ImplicitDefs[i]; ++i) {
665 unsigned Reg = NewDesc.ImplicitDefs[i];
666 Inst->addOperand(MachineOperand::CreateReg(Reg, true, true));
670 legalizeOperands(Inst);
672 // Update the destination register class.
673 const TargetRegisterClass *NewDstRC = getOpRegClass(*Inst, 0);
675 switch (Inst->getOpcode()) {
676 // For target instructions, getOpRegClass just returns the virtual
677 // register class associated with the operand, so we need to find an
678 // equivalent VGPR register class in order to move the instruction to the
682 case AMDGPU::REG_SEQUENCE:
683 if (RI.hasVGPRs(NewDstRC))
685 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
693 unsigned DstReg = Inst->getOperand(0).getReg();
694 unsigned NewDstReg = MRI.createVirtualRegister(NewDstRC);
695 MRI.replaceRegWith(DstReg, NewDstReg);
697 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(NewDstReg),
698 E = MRI.use_end(); I != E; ++I) {
699 MachineInstr &UseMI = *I;
700 if (!canReadVGPR(UseMI, I.getOperandNo())) {
701 Worklist.push_back(&UseMI);
707 //===----------------------------------------------------------------------===//
708 // Indirect addressing callbacks
709 //===----------------------------------------------------------------------===//
711 unsigned SIInstrInfo::calculateIndirectAddress(unsigned RegIndex,
712 unsigned Channel) const {
713 assert(Channel == 0);
717 const TargetRegisterClass *SIInstrInfo::getIndirectAddrRegClass() const {
718 return &AMDGPU::VReg_32RegClass;
721 MachineInstrBuilder SIInstrInfo::buildIndirectWrite(
722 MachineBasicBlock *MBB,
723 MachineBasicBlock::iterator I,
725 unsigned Address, unsigned OffsetReg) const {
726 const DebugLoc &DL = MBB->findDebugLoc(I);
727 unsigned IndirectBaseReg = AMDGPU::VReg_32RegClass.getRegister(
728 getIndirectIndexBegin(*MBB->getParent()));
730 return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_DST_V1))
731 .addReg(IndirectBaseReg, RegState::Define)
732 .addOperand(I->getOperand(0))
733 .addReg(IndirectBaseReg)
739 MachineInstrBuilder SIInstrInfo::buildIndirectRead(
740 MachineBasicBlock *MBB,
741 MachineBasicBlock::iterator I,
743 unsigned Address, unsigned OffsetReg) const {
744 const DebugLoc &DL = MBB->findDebugLoc(I);
745 unsigned IndirectBaseReg = AMDGPU::VReg_32RegClass.getRegister(
746 getIndirectIndexBegin(*MBB->getParent()));
748 return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_SRC))
749 .addOperand(I->getOperand(0))
750 .addOperand(I->getOperand(1))
751 .addReg(IndirectBaseReg)
757 void SIInstrInfo::reserveIndirectRegisters(BitVector &Reserved,
758 const MachineFunction &MF) const {
759 int End = getIndirectIndexEnd(MF);
760 int Begin = getIndirectIndexBegin(MF);
766 for (int Index = Begin; Index <= End; ++Index)
767 Reserved.set(AMDGPU::VReg_32RegClass.getRegister(Index));
769 for (int Index = std::max(0, Begin - 1); Index <= End; ++Index)
770 Reserved.set(AMDGPU::VReg_64RegClass.getRegister(Index));
772 for (int Index = std::max(0, Begin - 2); Index <= End; ++Index)
773 Reserved.set(AMDGPU::VReg_96RegClass.getRegister(Index));
775 for (int Index = std::max(0, Begin - 3); Index <= End; ++Index)
776 Reserved.set(AMDGPU::VReg_128RegClass.getRegister(Index));
778 for (int Index = std::max(0, Begin - 7); Index <= End; ++Index)
779 Reserved.set(AMDGPU::VReg_256RegClass.getRegister(Index));
781 for (int Index = std::max(0, Begin - 15); Index <= End; ++Index)
782 Reserved.set(AMDGPU::VReg_512RegClass.getRegister(Index));