1 //===-- SIInstrInfo.cpp - SI Instruction Information ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief SI Implementation of TargetInstrInfo.
13 //===----------------------------------------------------------------------===//
16 #include "SIInstrInfo.h"
17 #include "AMDGPUTargetMachine.h"
18 #include "SIDefines.h"
19 #include "SIMachineFunctionInfo.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/MC/MCInstrDesc.h"
26 SIInstrInfo::SIInstrInfo(AMDGPUTargetMachine &tm)
27 : AMDGPUInstrInfo(tm),
30 //===----------------------------------------------------------------------===//
31 // TargetInstrInfo callbacks
32 //===----------------------------------------------------------------------===//
35 SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
36 MachineBasicBlock::iterator MI, DebugLoc DL,
37 unsigned DestReg, unsigned SrcReg,
40 // If we are trying to copy to or from SCC, there is a bug somewhere else in
41 // the backend. While it may be theoretically possible to do this, it should
42 // never be necessary.
43 assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC);
45 static const int16_t Sub0_15[] = {
46 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
47 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
48 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
49 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 0
52 static const int16_t Sub0_7[] = {
53 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
54 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 0
57 static const int16_t Sub0_3[] = {
58 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 0
61 static const int16_t Sub0_2[] = {
62 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, 0
65 static const int16_t Sub0_1[] = {
66 AMDGPU::sub0, AMDGPU::sub1, 0
70 const int16_t *SubIndices;
72 if (AMDGPU::M0 == DestReg) {
73 // Check if M0 isn't already set to this value
74 for (MachineBasicBlock::reverse_iterator E = MBB.rend(),
75 I = MachineBasicBlock::reverse_iterator(MI); I != E; ++I) {
77 if (!I->definesRegister(AMDGPU::M0))
80 unsigned Opc = I->getOpcode();
81 if (Opc != TargetOpcode::COPY && Opc != AMDGPU::S_MOV_B32)
84 if (!I->readsRegister(SrcReg))
87 // The copy isn't necessary
92 if (AMDGPU::SReg_32RegClass.contains(DestReg)) {
93 assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
94 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
95 .addReg(SrcReg, getKillRegState(KillSrc));
98 } else if (AMDGPU::SReg_64RegClass.contains(DestReg)) {
99 assert(AMDGPU::SReg_64RegClass.contains(SrcReg));
100 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
101 .addReg(SrcReg, getKillRegState(KillSrc));
104 } else if (AMDGPU::SReg_128RegClass.contains(DestReg)) {
105 assert(AMDGPU::SReg_128RegClass.contains(SrcReg));
106 Opcode = AMDGPU::S_MOV_B32;
109 } else if (AMDGPU::SReg_256RegClass.contains(DestReg)) {
110 assert(AMDGPU::SReg_256RegClass.contains(SrcReg));
111 Opcode = AMDGPU::S_MOV_B32;
114 } else if (AMDGPU::SReg_512RegClass.contains(DestReg)) {
115 assert(AMDGPU::SReg_512RegClass.contains(SrcReg));
116 Opcode = AMDGPU::S_MOV_B32;
117 SubIndices = Sub0_15;
119 } else if (AMDGPU::VReg_32RegClass.contains(DestReg)) {
120 assert(AMDGPU::VReg_32RegClass.contains(SrcReg) ||
121 AMDGPU::SReg_32RegClass.contains(SrcReg));
122 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
123 .addReg(SrcReg, getKillRegState(KillSrc));
126 } else if (AMDGPU::VReg_64RegClass.contains(DestReg)) {
127 assert(AMDGPU::VReg_64RegClass.contains(SrcReg) ||
128 AMDGPU::SReg_64RegClass.contains(SrcReg));
129 Opcode = AMDGPU::V_MOV_B32_e32;
132 } else if (AMDGPU::VReg_96RegClass.contains(DestReg)) {
133 assert(AMDGPU::VReg_96RegClass.contains(SrcReg));
134 Opcode = AMDGPU::V_MOV_B32_e32;
137 } else if (AMDGPU::VReg_128RegClass.contains(DestReg)) {
138 assert(AMDGPU::VReg_128RegClass.contains(SrcReg) ||
139 AMDGPU::SReg_128RegClass.contains(SrcReg));
140 Opcode = AMDGPU::V_MOV_B32_e32;
143 } else if (AMDGPU::VReg_256RegClass.contains(DestReg)) {
144 assert(AMDGPU::VReg_256RegClass.contains(SrcReg) ||
145 AMDGPU::SReg_256RegClass.contains(SrcReg));
146 Opcode = AMDGPU::V_MOV_B32_e32;
149 } else if (AMDGPU::VReg_512RegClass.contains(DestReg)) {
150 assert(AMDGPU::VReg_512RegClass.contains(SrcReg) ||
151 AMDGPU::SReg_512RegClass.contains(SrcReg));
152 Opcode = AMDGPU::V_MOV_B32_e32;
153 SubIndices = Sub0_15;
156 llvm_unreachable("Can't copy register!");
159 while (unsigned SubIdx = *SubIndices++) {
160 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
161 get(Opcode), RI.getSubReg(DestReg, SubIdx));
163 Builder.addReg(RI.getSubReg(SrcReg, SubIdx), getKillRegState(KillSrc));
166 Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
170 unsigned SIInstrInfo::commuteOpcode(unsigned Opcode) const {
173 // Try to map original to commuted opcode
174 if ((NewOpc = AMDGPU::getCommuteRev(Opcode)) != -1)
177 // Try to map commuted to original opcode
178 if ((NewOpc = AMDGPU::getCommuteOrig(Opcode)) != -1)
184 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
185 MachineBasicBlock::iterator MI,
186 unsigned SrcReg, bool isKill,
188 const TargetRegisterClass *RC,
189 const TargetRegisterInfo *TRI) const {
190 SIMachineFunctionInfo *MFI = MBB.getParent()->getInfo<SIMachineFunctionInfo>();
191 DebugLoc DL = MBB.findDebugLoc(MI);
192 unsigned KillFlag = isKill ? RegState::Kill : 0;
193 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
195 if (TRI->getCommonSubClass(RC, &AMDGPU::SGPR_32RegClass)) {
196 unsigned Lane = MFI->SpillTracker.reserveLanes(MRI, MBB.getParent());
198 BuildMI(MBB, MI, DL, get(AMDGPU::V_WRITELANE_B32), MFI->SpillTracker.LaneVGPR)
199 .addReg(SrcReg, KillFlag)
201 MFI->SpillTracker.addSpilledReg(FrameIndex, MFI->SpillTracker.LaneVGPR, Lane);
202 } else if (RI.isSGPRClass(RC)) {
203 // We are only allowed to create one new instruction when spilling
204 // registers, so we need to use pseudo instruction for vector
207 // Reserve a spot in the spill tracker for each sub-register of
208 // the vector register.
209 unsigned NumSubRegs = RC->getSize() / 4;
210 unsigned FirstLane = MFI->SpillTracker.reserveLanes(MRI, MBB.getParent(),
212 MFI->SpillTracker.addSpilledReg(FrameIndex, MFI->SpillTracker.LaneVGPR,
216 switch (RC->getSize() * 8) {
217 case 64: Opcode = AMDGPU::SI_SPILL_S64_SAVE; break;
218 case 128: Opcode = AMDGPU::SI_SPILL_S128_SAVE; break;
219 case 256: Opcode = AMDGPU::SI_SPILL_S256_SAVE; break;
220 case 512: Opcode = AMDGPU::SI_SPILL_S512_SAVE; break;
221 default: llvm_unreachable("Cannot spill register class");
224 BuildMI(MBB, MI, DL, get(Opcode), MFI->SpillTracker.LaneVGPR)
228 llvm_unreachable("VGPR spilling not supported");
232 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
233 MachineBasicBlock::iterator MI,
234 unsigned DestReg, int FrameIndex,
235 const TargetRegisterClass *RC,
236 const TargetRegisterInfo *TRI) const {
237 SIMachineFunctionInfo *MFI = MBB.getParent()->getInfo<SIMachineFunctionInfo>();
238 DebugLoc DL = MBB.findDebugLoc(MI);
239 if (TRI->getCommonSubClass(RC, &AMDGPU::SReg_32RegClass)) {
240 SIMachineFunctionInfo::SpilledReg Spill =
241 MFI->SpillTracker.getSpilledReg(FrameIndex);
243 BuildMI(MBB, MI, DL, get(AMDGPU::V_READLANE_B32), DestReg)
247 } else if (RI.isSGPRClass(RC)){
249 switch(RC->getSize() * 8) {
250 case 64: Opcode = AMDGPU::SI_SPILL_S64_RESTORE; break;
251 case 128: Opcode = AMDGPU::SI_SPILL_S128_RESTORE; break;
252 case 256: Opcode = AMDGPU::SI_SPILL_S256_RESTORE; break;
253 case 512: Opcode = AMDGPU::SI_SPILL_S512_RESTORE; break;
254 default: llvm_unreachable("Cannot spill register class");
257 SIMachineFunctionInfo::SpilledReg Spill =
258 MFI->SpillTracker.getSpilledReg(FrameIndex);
260 BuildMI(MBB, MI, DL, get(Opcode), DestReg)
265 llvm_unreachable("VGPR spilling not supported");
269 static unsigned getNumSubRegsForSpillOp(unsigned Op) {
272 case AMDGPU::SI_SPILL_S512_SAVE:
273 case AMDGPU::SI_SPILL_S512_RESTORE:
275 case AMDGPU::SI_SPILL_S256_SAVE:
276 case AMDGPU::SI_SPILL_S256_RESTORE:
278 case AMDGPU::SI_SPILL_S128_SAVE:
279 case AMDGPU::SI_SPILL_S128_RESTORE:
281 case AMDGPU::SI_SPILL_S64_SAVE:
282 case AMDGPU::SI_SPILL_S64_RESTORE:
284 default: llvm_unreachable("Invalid spill opcode");
288 void SIInstrInfo::insertNOPs(MachineBasicBlock::iterator MI,
297 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(AMDGPU::S_NOP))
302 bool SIInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
303 SIMachineFunctionInfo *MFI =
304 MI->getParent()->getParent()->getInfo<SIMachineFunctionInfo>();
305 MachineBasicBlock &MBB = *MI->getParent();
306 DebugLoc DL = MBB.findDebugLoc(MI);
307 switch (MI->getOpcode()) {
308 default: return AMDGPUInstrInfo::expandPostRAPseudo(MI);
310 // SGPR register spill
311 case AMDGPU::SI_SPILL_S512_SAVE:
312 case AMDGPU::SI_SPILL_S256_SAVE:
313 case AMDGPU::SI_SPILL_S128_SAVE:
314 case AMDGPU::SI_SPILL_S64_SAVE: {
315 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
316 unsigned FrameIndex = MI->getOperand(2).getImm();
318 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
319 SIMachineFunctionInfo::SpilledReg Spill;
320 unsigned SubReg = RI.getPhysRegSubReg(MI->getOperand(1).getReg(),
321 &AMDGPU::SGPR_32RegClass, i);
322 Spill = MFI->SpillTracker.getSpilledReg(FrameIndex);
324 BuildMI(MBB, MI, DL, get(AMDGPU::V_WRITELANE_B32),
325 MI->getOperand(0).getReg())
327 .addImm(Spill.Lane + i);
329 MI->eraseFromParent();
333 // SGPR register restore
334 case AMDGPU::SI_SPILL_S512_RESTORE:
335 case AMDGPU::SI_SPILL_S256_RESTORE:
336 case AMDGPU::SI_SPILL_S128_RESTORE:
337 case AMDGPU::SI_SPILL_S64_RESTORE: {
338 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
340 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
341 SIMachineFunctionInfo::SpilledReg Spill;
342 unsigned FrameIndex = MI->getOperand(2).getImm();
343 unsigned SubReg = RI.getPhysRegSubReg(MI->getOperand(0).getReg(),
344 &AMDGPU::SGPR_32RegClass, i);
345 Spill = MFI->SpillTracker.getSpilledReg(FrameIndex);
347 BuildMI(MBB, MI, DL, get(AMDGPU::V_READLANE_B32), SubReg)
348 .addReg(MI->getOperand(1).getReg())
349 .addImm(Spill.Lane + i);
351 MI->eraseFromParent();
358 MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
361 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
362 if (MI->getNumOperands() < 3 || !MI->getOperand(1).isReg())
365 // Cannot commute VOP2 if src0 is SGPR.
366 if (isVOP2(MI->getOpcode()) && MI->getOperand(1).isReg() &&
367 RI.isSGPRClass(MRI.getRegClass(MI->getOperand(1).getReg())))
370 if (!MI->getOperand(2).isReg()) {
371 // XXX: Commute instructions with FPImm operands
372 if (NewMI || MI->getOperand(2).isFPImm() ||
373 (!isVOP2(MI->getOpcode()) && !isVOP3(MI->getOpcode()))) {
377 // XXX: Commute VOP3 instructions with abs and neg set.
378 if (isVOP3(MI->getOpcode()) &&
379 (MI->getOperand(AMDGPU::getNamedOperandIdx(MI->getOpcode(),
380 AMDGPU::OpName::abs)).getImm() ||
381 MI->getOperand(AMDGPU::getNamedOperandIdx(MI->getOpcode(),
382 AMDGPU::OpName::neg)).getImm()))
385 unsigned Reg = MI->getOperand(1).getReg();
386 unsigned SubReg = MI->getOperand(1).getSubReg();
387 MI->getOperand(1).ChangeToImmediate(MI->getOperand(2).getImm());
388 MI->getOperand(2).ChangeToRegister(Reg, false);
389 MI->getOperand(2).setSubReg(SubReg);
391 MI = TargetInstrInfo::commuteInstruction(MI, NewMI);
395 MI->setDesc(get(commuteOpcode(MI->getOpcode())));
400 MachineInstr *SIInstrInfo::buildMovInstr(MachineBasicBlock *MBB,
401 MachineBasicBlock::iterator I,
403 unsigned SrcReg) const {
404 return BuildMI(*MBB, I, MBB->findDebugLoc(I), get(AMDGPU::V_MOV_B32_e32),
405 DstReg) .addReg(SrcReg);
408 bool SIInstrInfo::isMov(unsigned Opcode) const {
410 default: return false;
411 case AMDGPU::S_MOV_B32:
412 case AMDGPU::S_MOV_B64:
413 case AMDGPU::V_MOV_B32_e32:
414 case AMDGPU::V_MOV_B32_e64:
420 SIInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
421 return RC != &AMDGPU::EXECRegRegClass;
425 SIInstrInfo::isTriviallyReMaterializable(const MachineInstr *MI,
426 AliasAnalysis *AA) const {
427 switch(MI->getOpcode()) {
428 default: return AMDGPUInstrInfo::isTriviallyReMaterializable(MI, AA);
429 case AMDGPU::S_MOV_B32:
430 case AMDGPU::S_MOV_B64:
431 case AMDGPU::V_MOV_B32_e32:
432 return MI->getOperand(1).isImm();
438 // Helper function generated by tablegen. We are wrapping this with
439 // an SIInstrInfo function that reutrns bool rather than int.
440 int isDS(uint16_t Opcode);
444 bool SIInstrInfo::isDS(uint16_t Opcode) const {
445 return ::AMDGPU::isDS(Opcode) != -1;
448 int SIInstrInfo::isMIMG(uint16_t Opcode) const {
449 return get(Opcode).TSFlags & SIInstrFlags::MIMG;
452 int SIInstrInfo::isSMRD(uint16_t Opcode) const {
453 return get(Opcode).TSFlags & SIInstrFlags::SMRD;
456 bool SIInstrInfo::isVOP1(uint16_t Opcode) const {
457 return get(Opcode).TSFlags & SIInstrFlags::VOP1;
460 bool SIInstrInfo::isVOP2(uint16_t Opcode) const {
461 return get(Opcode).TSFlags & SIInstrFlags::VOP2;
464 bool SIInstrInfo::isVOP3(uint16_t Opcode) const {
465 return get(Opcode).TSFlags & SIInstrFlags::VOP3;
468 bool SIInstrInfo::isVOPC(uint16_t Opcode) const {
469 return get(Opcode).TSFlags & SIInstrFlags::VOPC;
472 bool SIInstrInfo::isSALUInstr(const MachineInstr &MI) const {
473 return get(MI.getOpcode()).TSFlags & SIInstrFlags::SALU;
476 bool SIInstrInfo::isInlineConstant(const APInt &Imm) const {
477 int32_t Val = Imm.getSExtValue();
478 if (Val >= -16 && Val <= 64)
481 // The actual type of the operand does not seem to matter as long
482 // as the bits match one of the inline immediate values. For example:
484 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
485 // so it is a legal inline immediate.
487 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
488 // floating-point, so it is a legal inline immediate.
490 return (APInt::floatToBits(0.0f) == Imm) ||
491 (APInt::floatToBits(1.0f) == Imm) ||
492 (APInt::floatToBits(-1.0f) == Imm) ||
493 (APInt::floatToBits(0.5f) == Imm) ||
494 (APInt::floatToBits(-0.5f) == Imm) ||
495 (APInt::floatToBits(2.0f) == Imm) ||
496 (APInt::floatToBits(-2.0f) == Imm) ||
497 (APInt::floatToBits(4.0f) == Imm) ||
498 (APInt::floatToBits(-4.0f) == Imm);
501 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO) const {
503 return isInlineConstant(APInt(32, MO.getImm(), true));
506 APFloat FpImm = MO.getFPImm()->getValueAPF();
507 return isInlineConstant(FpImm.bitcastToAPInt());
513 bool SIInstrInfo::isLiteralConstant(const MachineOperand &MO) const {
514 return (MO.isImm() || MO.isFPImm()) && !isInlineConstant(MO);
517 bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
518 StringRef &ErrInfo) const {
519 uint16_t Opcode = MI->getOpcode();
520 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
521 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
522 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
524 // Make sure the number of operands is correct.
525 const MCInstrDesc &Desc = get(Opcode);
526 if (!Desc.isVariadic() &&
527 Desc.getNumOperands() != MI->getNumExplicitOperands()) {
528 ErrInfo = "Instruction has wrong number of operands.";
532 // Make sure the register classes are correct
533 for (unsigned i = 0, e = Desc.getNumOperands(); i != e; ++i) {
534 switch (Desc.OpInfo[i].OperandType) {
535 case MCOI::OPERAND_REGISTER:
537 case MCOI::OPERAND_IMMEDIATE:
538 if (!MI->getOperand(i).isImm() && !MI->getOperand(i).isFPImm()) {
539 ErrInfo = "Expected immediate, but got non-immediate";
547 if (!MI->getOperand(i).isReg())
550 int RegClass = Desc.OpInfo[i].RegClass;
551 if (RegClass != -1) {
552 unsigned Reg = MI->getOperand(i).getReg();
553 if (TargetRegisterInfo::isVirtualRegister(Reg))
556 const TargetRegisterClass *RC = RI.getRegClass(RegClass);
557 if (!RC->contains(Reg)) {
558 ErrInfo = "Operand has incorrect register class.";
566 if (isVOP1(Opcode) || isVOP2(Opcode) || isVOP3(Opcode) || isVOPC(Opcode)) {
567 unsigned ConstantBusCount = 0;
568 unsigned SGPRUsed = AMDGPU::NoRegister;
569 for (int i = 0, e = MI->getNumOperands(); i != e; ++i) {
570 const MachineOperand &MO = MI->getOperand(i);
571 if (MO.isReg() && MO.isUse() &&
572 !TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
574 // EXEC register uses the constant bus.
575 if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC)
578 // SGPRs use the constant bus
579 if (MO.getReg() == AMDGPU::M0 || MO.getReg() == AMDGPU::VCC ||
581 (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) ||
582 AMDGPU::SGPR_64RegClass.contains(MO.getReg())))) {
583 if (SGPRUsed != MO.getReg()) {
585 SGPRUsed = MO.getReg();
589 // Literal constants use the constant bus.
590 if (isLiteralConstant(MO))
593 if (ConstantBusCount > 1) {
594 ErrInfo = "VOP* instruction uses the constant bus more than once";
599 // Verify SRC1 for VOP2 and VOPC
600 if (Src1Idx != -1 && (isVOP2(Opcode) || isVOPC(Opcode))) {
601 const MachineOperand &Src1 = MI->getOperand(Src1Idx);
602 if (Src1.isImm() || Src1.isFPImm()) {
603 ErrInfo = "VOP[2C] src1 cannot be an immediate.";
609 if (isVOP3(Opcode)) {
610 if (Src0Idx != -1 && isLiteralConstant(MI->getOperand(Src0Idx))) {
611 ErrInfo = "VOP3 src0 cannot be a literal constant.";
614 if (Src1Idx != -1 && isLiteralConstant(MI->getOperand(Src1Idx))) {
615 ErrInfo = "VOP3 src1 cannot be a literal constant.";
618 if (Src2Idx != -1 && isLiteralConstant(MI->getOperand(Src2Idx))) {
619 ErrInfo = "VOP3 src2 cannot be a literal constant.";
626 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) {
627 switch (MI.getOpcode()) {
628 default: return AMDGPU::INSTRUCTION_LIST_END;
629 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE;
630 case AMDGPU::COPY: return AMDGPU::COPY;
631 case AMDGPU::PHI: return AMDGPU::PHI;
632 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG;
633 case AMDGPU::S_MOV_B32:
634 return MI.getOperand(1).isReg() ?
635 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32;
636 case AMDGPU::S_ADD_I32: return AMDGPU::V_ADD_I32_e32;
637 case AMDGPU::S_ADDC_U32: return AMDGPU::V_ADDC_U32_e32;
638 case AMDGPU::S_SUB_I32: return AMDGPU::V_SUB_I32_e32;
639 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32;
640 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e32;
641 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e32;
642 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e32;
643 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e32;
644 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e32;
645 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e32;
646 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e32;
647 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32;
648 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64;
649 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32;
650 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64;
651 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32;
652 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64;
653 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32;
654 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32;
655 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32;
656 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32;
657 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32;
658 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32;
659 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32;
660 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32;
661 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32;
662 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32;
663 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32;
664 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32;
665 case AMDGPU::S_LOAD_DWORD_IMM:
666 case AMDGPU::S_LOAD_DWORD_SGPR: return AMDGPU::BUFFER_LOAD_DWORD_ADDR64;
667 case AMDGPU::S_LOAD_DWORDX2_IMM:
668 case AMDGPU::S_LOAD_DWORDX2_SGPR: return AMDGPU::BUFFER_LOAD_DWORDX2_ADDR64;
669 case AMDGPU::S_LOAD_DWORDX4_IMM:
670 case AMDGPU::S_LOAD_DWORDX4_SGPR: return AMDGPU::BUFFER_LOAD_DWORDX4_ADDR64;
671 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e32;
675 bool SIInstrInfo::isSALUOpSupportedOnVALU(const MachineInstr &MI) const {
676 return getVALUOp(MI) != AMDGPU::INSTRUCTION_LIST_END;
679 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI,
680 unsigned OpNo) const {
681 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
682 const MCInstrDesc &Desc = get(MI.getOpcode());
683 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() ||
684 Desc.OpInfo[OpNo].RegClass == -1)
685 return MRI.getRegClass(MI.getOperand(OpNo).getReg());
687 unsigned RCID = Desc.OpInfo[OpNo].RegClass;
688 return RI.getRegClass(RCID);
691 bool SIInstrInfo::canReadVGPR(const MachineInstr &MI, unsigned OpNo) const {
692 switch (MI.getOpcode()) {
694 case AMDGPU::REG_SEQUENCE:
696 case AMDGPU::INSERT_SUBREG:
697 return RI.hasVGPRs(getOpRegClass(MI, 0));
699 return RI.hasVGPRs(getOpRegClass(MI, OpNo));
703 void SIInstrInfo::legalizeOpWithMove(MachineInstr *MI, unsigned OpIdx) const {
704 MachineBasicBlock::iterator I = MI;
705 MachineOperand &MO = MI->getOperand(OpIdx);
706 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
707 unsigned RCID = get(MI->getOpcode()).OpInfo[OpIdx].RegClass;
708 const TargetRegisterClass *RC = RI.getRegClass(RCID);
709 unsigned Opcode = AMDGPU::V_MOV_B32_e32;
711 Opcode = AMDGPU::COPY;
712 } else if (RI.isSGPRClass(RC)) {
713 Opcode = AMDGPU::S_MOV_B32;
716 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC);
717 unsigned Reg = MRI.createVirtualRegister(VRC);
718 BuildMI(*MI->getParent(), I, MI->getParent()->findDebugLoc(I), get(Opcode),
720 MO.ChangeToRegister(Reg, false);
723 unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI,
724 MachineRegisterInfo &MRI,
725 MachineOperand &SuperReg,
726 const TargetRegisterClass *SuperRC,
728 const TargetRegisterClass *SubRC)
730 assert(SuperReg.isReg());
732 unsigned NewSuperReg = MRI.createVirtualRegister(SuperRC);
733 unsigned SubReg = MRI.createVirtualRegister(SubRC);
735 // Just in case the super register is itself a sub-register, copy it to a new
736 // value so we don't need to worry about merging its subreg index with the
737 // SubIdx passed to this function. The register coalescer should be able to
738 // eliminate this extra copy.
739 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(TargetOpcode::COPY),
741 .addOperand(SuperReg);
743 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(TargetOpcode::COPY),
745 .addReg(NewSuperReg, 0, SubIdx);
749 MachineOperand SIInstrInfo::buildExtractSubRegOrImm(
750 MachineBasicBlock::iterator MII,
751 MachineRegisterInfo &MRI,
753 const TargetRegisterClass *SuperRC,
755 const TargetRegisterClass *SubRC) const {
757 // XXX - Is there a better way to do this?
758 if (SubIdx == AMDGPU::sub0)
759 return MachineOperand::CreateImm(Op.getImm() & 0xFFFFFFFF);
760 if (SubIdx == AMDGPU::sub1)
761 return MachineOperand::CreateImm(Op.getImm() >> 32);
763 llvm_unreachable("Unhandled register index for immediate");
766 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC,
768 return MachineOperand::CreateReg(SubReg, false);
771 unsigned SIInstrInfo::split64BitImm(SmallVectorImpl<MachineInstr *> &Worklist,
772 MachineBasicBlock::iterator MI,
773 MachineRegisterInfo &MRI,
774 const TargetRegisterClass *RC,
775 const MachineOperand &Op) const {
776 MachineBasicBlock *MBB = MI->getParent();
777 DebugLoc DL = MI->getDebugLoc();
778 unsigned LoDst = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
779 unsigned HiDst = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
780 unsigned Dst = MRI.createVirtualRegister(RC);
782 MachineInstr *Lo = BuildMI(*MBB, MI, DL, get(AMDGPU::S_MOV_B32),
784 .addImm(Op.getImm() & 0xFFFFFFFF);
785 MachineInstr *Hi = BuildMI(*MBB, MI, DL, get(AMDGPU::S_MOV_B32),
787 .addImm(Op.getImm() >> 32);
789 BuildMI(*MBB, MI, DL, get(TargetOpcode::REG_SEQUENCE), Dst)
791 .addImm(AMDGPU::sub0)
793 .addImm(AMDGPU::sub1);
795 Worklist.push_back(Lo);
796 Worklist.push_back(Hi);
801 void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
802 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
803 int Src0Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
804 AMDGPU::OpName::src0);
805 int Src1Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
806 AMDGPU::OpName::src1);
807 int Src2Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
808 AMDGPU::OpName::src2);
811 if (isVOP2(MI->getOpcode()) && Src1Idx != -1) {
812 MachineOperand &Src0 = MI->getOperand(Src0Idx);
813 MachineOperand &Src1 = MI->getOperand(Src1Idx);
815 // If the instruction implicitly reads VCC, we can't have any SGPR operands,
817 bool ReadsVCC = MI->readsRegister(AMDGPU::VCC, &RI);
818 if (ReadsVCC && Src0.isReg() &&
819 RI.isSGPRClass(MRI.getRegClass(Src0.getReg()))) {
820 legalizeOpWithMove(MI, Src0Idx);
824 if (ReadsVCC && Src1.isReg() &&
825 RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) {
826 legalizeOpWithMove(MI, Src1Idx);
830 // Legalize VOP2 instructions where src1 is not a VGPR. An SGPR input must
831 // be the first operand, and there can only be one.
832 if (Src1.isImm() || Src1.isFPImm() ||
833 (Src1.isReg() && RI.isSGPRClass(MRI.getRegClass(Src1.getReg())))) {
834 if (MI->isCommutable()) {
835 if (commuteInstruction(MI))
838 legalizeOpWithMove(MI, Src1Idx);
842 // XXX - Do any VOP3 instructions read VCC?
844 if (isVOP3(MI->getOpcode())) {
845 int VOP3Idx[3] = {Src0Idx, Src1Idx, Src2Idx};
846 unsigned SGPRReg = AMDGPU::NoRegister;
847 for (unsigned i = 0; i < 3; ++i) {
848 int Idx = VOP3Idx[i];
851 MachineOperand &MO = MI->getOperand(Idx);
854 if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg())))
855 continue; // VGPRs are legal
857 assert(MO.getReg() != AMDGPU::SCC && "SCC operand to VOP3 instruction");
859 if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.getReg()) {
860 SGPRReg = MO.getReg();
861 // We can use one SGPR in each VOP3 instruction.
864 } else if (!isLiteralConstant(MO)) {
865 // If it is not a register and not a literal constant, then it must be
866 // an inline constant which is always legal.
869 // If we make it this far, then the operand is not legal and we must
871 legalizeOpWithMove(MI, Idx);
875 // Legalize REG_SEQUENCE and PHI
876 // The register class of the operands much be the same type as the register
877 // class of the output.
878 if (MI->getOpcode() == AMDGPU::REG_SEQUENCE ||
879 MI->getOpcode() == AMDGPU::PHI) {
880 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr;
881 for (unsigned i = 1, e = MI->getNumOperands(); i != e; i+=2) {
882 if (!MI->getOperand(i).isReg() ||
883 !TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg()))
885 const TargetRegisterClass *OpRC =
886 MRI.getRegClass(MI->getOperand(i).getReg());
887 if (RI.hasVGPRs(OpRC)) {
894 // If any of the operands are VGPR registers, then they all most be
895 // otherwise we will create illegal VGPR->SGPR copies when legalizing
897 if (VRC || !RI.isSGPRClass(getOpRegClass(*MI, 0))) {
900 VRC = RI.getEquivalentVGPRClass(SRC);
907 // Update all the operands so they have the same type.
908 for (unsigned i = 1, e = MI->getNumOperands(); i != e; i+=2) {
909 if (!MI->getOperand(i).isReg() ||
910 !TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg()))
912 unsigned DstReg = MRI.createVirtualRegister(RC);
913 MachineBasicBlock *InsertBB;
914 MachineBasicBlock::iterator Insert;
915 if (MI->getOpcode() == AMDGPU::REG_SEQUENCE) {
916 InsertBB = MI->getParent();
919 // MI is a PHI instruction.
920 InsertBB = MI->getOperand(i + 1).getMBB();
921 Insert = InsertBB->getFirstTerminator();
923 BuildMI(*InsertBB, Insert, MI->getDebugLoc(),
924 get(AMDGPU::COPY), DstReg)
925 .addOperand(MI->getOperand(i));
926 MI->getOperand(i).setReg(DstReg);
930 // Legalize INSERT_SUBREG
931 // src0 must have the same register class as dst
932 if (MI->getOpcode() == AMDGPU::INSERT_SUBREG) {
933 unsigned Dst = MI->getOperand(0).getReg();
934 unsigned Src0 = MI->getOperand(1).getReg();
935 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
936 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0);
937 if (DstRC != Src0RC) {
938 MachineBasicBlock &MBB = *MI->getParent();
939 unsigned NewSrc0 = MRI.createVirtualRegister(DstRC);
940 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::COPY), NewSrc0)
942 MI->getOperand(1).setReg(NewSrc0);
947 // Legalize MUBUF* instructions
948 // FIXME: If we start using the non-addr64 instructions for compute, we
949 // may need to legalize them here.
951 int SRsrcIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
952 AMDGPU::OpName::srsrc);
953 int VAddrIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
954 AMDGPU::OpName::vaddr);
955 if (SRsrcIdx != -1 && VAddrIdx != -1) {
956 const TargetRegisterClass *VAddrRC =
957 RI.getRegClass(get(MI->getOpcode()).OpInfo[VAddrIdx].RegClass);
959 if(VAddrRC->getSize() == 8 &&
960 MRI.getRegClass(MI->getOperand(SRsrcIdx).getReg()) != VAddrRC) {
961 // We have a MUBUF instruction that uses a 64-bit vaddr register and
962 // srsrc has the incorrect register class. In order to fix this, we
963 // need to extract the pointer from the resource descriptor (srsrc),
964 // add it to the value of vadd, then store the result in the vaddr
965 // operand. Then, we need to set the pointer field of the resource
966 // descriptor to zero.
968 MachineBasicBlock &MBB = *MI->getParent();
969 MachineOperand &SRsrcOp = MI->getOperand(SRsrcIdx);
970 MachineOperand &VAddrOp = MI->getOperand(VAddrIdx);
971 unsigned SRsrcPtrLo, SRsrcPtrHi, VAddrLo, VAddrHi;
972 unsigned NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
973 unsigned NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
974 unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
975 unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
976 unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
977 unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
978 unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
980 // SRsrcPtrLo = srsrc:sub0
981 SRsrcPtrLo = buildExtractSubReg(MI, MRI, SRsrcOp,
982 &AMDGPU::VReg_128RegClass, AMDGPU::sub0, &AMDGPU::VReg_32RegClass);
984 // SRsrcPtrHi = srsrc:sub1
985 SRsrcPtrHi = buildExtractSubReg(MI, MRI, SRsrcOp,
986 &AMDGPU::VReg_128RegClass, AMDGPU::sub1, &AMDGPU::VReg_32RegClass);
988 // VAddrLo = vaddr:sub0
989 VAddrLo = buildExtractSubReg(MI, MRI, VAddrOp,
990 &AMDGPU::VReg_64RegClass, AMDGPU::sub0, &AMDGPU::VReg_32RegClass);
992 // VAddrHi = vaddr:sub1
993 VAddrHi = buildExtractSubReg(MI, MRI, VAddrOp,
994 &AMDGPU::VReg_64RegClass, AMDGPU::sub1, &AMDGPU::VReg_32RegClass);
996 // NewVaddrLo = SRsrcPtrLo + VAddrLo
997 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::V_ADD_I32_e32),
1001 .addReg(AMDGPU::VCC, RegState::Define | RegState::Implicit);
1003 // NewVaddrHi = SRsrcPtrHi + VAddrHi
1004 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::V_ADDC_U32_e32),
1008 .addReg(AMDGPU::VCC, RegState::ImplicitDefine)
1009 .addReg(AMDGPU::VCC, RegState::Implicit);
1011 // NewVaddr = {NewVaddrHi, NewVaddrLo}
1012 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
1015 .addImm(AMDGPU::sub0)
1017 .addImm(AMDGPU::sub1);
1020 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B64),
1024 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0}
1025 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
1027 .addImm(AMDGPU::RSRC_DATA_FORMAT & 0xFFFFFFFF);
1029 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32}
1030 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
1032 .addImm(AMDGPU::RSRC_DATA_FORMAT >> 32);
1034 // NewSRsrc = {Zero64, SRsrcFormat}
1035 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
1038 .addImm(AMDGPU::sub0_sub1)
1039 .addReg(SRsrcFormatLo)
1040 .addImm(AMDGPU::sub2)
1041 .addReg(SRsrcFormatHi)
1042 .addImm(AMDGPU::sub3);
1044 // Update the instruction to use NewVaddr
1045 MI->getOperand(VAddrIdx).setReg(NewVAddr);
1046 // Update the instruction to use NewSRsrc
1047 MI->getOperand(SRsrcIdx).setReg(NewSRsrc);
1052 void SIInstrInfo::moveSMRDToVALU(MachineInstr *MI, MachineRegisterInfo &MRI) const {
1053 MachineBasicBlock *MBB = MI->getParent();
1054 switch (MI->getOpcode()) {
1055 case AMDGPU::S_LOAD_DWORD_IMM:
1056 case AMDGPU::S_LOAD_DWORD_SGPR:
1057 case AMDGPU::S_LOAD_DWORDX2_IMM:
1058 case AMDGPU::S_LOAD_DWORDX2_SGPR:
1059 case AMDGPU::S_LOAD_DWORDX4_IMM:
1060 case AMDGPU::S_LOAD_DWORDX4_SGPR:
1061 unsigned NewOpcode = getVALUOp(*MI);
1065 if (MI->getOperand(2).isReg()) {
1066 RegOffset = MI->getOperand(2).getReg();
1069 assert(MI->getOperand(2).isImm());
1070 // SMRD instructions take a dword offsets and MUBUF instructions
1071 // take a byte offset.
1072 ImmOffset = MI->getOperand(2).getImm() << 2;
1073 RegOffset = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
1074 if (isUInt<12>(ImmOffset)) {
1075 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
1079 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32),
1086 unsigned SRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
1087 unsigned DWord0 = RegOffset;
1088 unsigned DWord1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
1089 unsigned DWord2 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
1090 unsigned DWord3 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
1092 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord1)
1094 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord2)
1095 .addImm(AMDGPU::RSRC_DATA_FORMAT & 0xFFFFFFFF);
1096 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord3)
1097 .addImm(AMDGPU::RSRC_DATA_FORMAT >> 32);
1098 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), SRsrc)
1100 .addImm(AMDGPU::sub0)
1102 .addImm(AMDGPU::sub1)
1104 .addImm(AMDGPU::sub2)
1106 .addImm(AMDGPU::sub3);
1107 MI->setDesc(get(NewOpcode));
1108 if (MI->getOperand(2).isReg()) {
1109 MI->getOperand(2).setReg(MI->getOperand(1).getReg());
1111 MI->getOperand(2).ChangeToRegister(MI->getOperand(1).getReg(), false);
1113 MI->getOperand(1).setReg(SRsrc);
1114 MI->addOperand(*MBB->getParent(), MachineOperand::CreateImm(ImmOffset));
1118 void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
1119 SmallVector<MachineInstr *, 128> Worklist;
1120 Worklist.push_back(&TopInst);
1122 while (!Worklist.empty()) {
1123 MachineInstr *Inst = Worklist.pop_back_val();
1124 MachineBasicBlock *MBB = Inst->getParent();
1125 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1127 unsigned Opcode = Inst->getOpcode();
1128 unsigned NewOpcode = getVALUOp(*Inst);
1130 // Handle some special cases
1133 if (isSMRD(Inst->getOpcode())) {
1134 moveSMRDToVALU(Inst, MRI);
1137 case AMDGPU::S_MOV_B64: {
1138 DebugLoc DL = Inst->getDebugLoc();
1140 // If the source operand is a register we can replace this with a
1142 if (Inst->getOperand(1).isReg()) {
1143 MachineInstr *Copy = BuildMI(*MBB, Inst, DL, get(TargetOpcode::COPY))
1144 .addOperand(Inst->getOperand(0))
1145 .addOperand(Inst->getOperand(1));
1146 Worklist.push_back(Copy);
1148 // Otherwise, we need to split this into two movs, because there is
1149 // no 64-bit VALU move instruction.
1150 unsigned Reg = Inst->getOperand(0).getReg();
1151 unsigned Dst = split64BitImm(Worklist,
1154 MRI.getRegClass(Reg),
1155 Inst->getOperand(1));
1156 MRI.replaceRegWith(Reg, Dst);
1158 Inst->eraseFromParent();
1161 case AMDGPU::S_AND_B64:
1162 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_AND_B32);
1163 Inst->eraseFromParent();
1166 case AMDGPU::S_OR_B64:
1167 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_OR_B32);
1168 Inst->eraseFromParent();
1171 case AMDGPU::S_XOR_B64:
1172 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XOR_B32);
1173 Inst->eraseFromParent();
1176 case AMDGPU::S_NOT_B64:
1177 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_NOT_B32);
1178 Inst->eraseFromParent();
1181 case AMDGPU::S_BCNT1_I32_B64:
1182 splitScalar64BitBCNT(Worklist, Inst);
1183 Inst->eraseFromParent();
1186 case AMDGPU::S_BFE_U64:
1187 case AMDGPU::S_BFE_I64:
1188 case AMDGPU::S_BFM_B64:
1189 llvm_unreachable("Moving this op to VALU not implemented");
1192 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) {
1193 // We cannot move this instruction to the VALU, so we should try to
1194 // legalize its operands instead.
1195 legalizeOperands(Inst);
1199 // Use the new VALU Opcode.
1200 const MCInstrDesc &NewDesc = get(NewOpcode);
1201 Inst->setDesc(NewDesc);
1203 // Remove any references to SCC. Vector instructions can't read from it, and
1204 // We're just about to add the implicit use / defs of VCC, and we don't want
1206 for (unsigned i = Inst->getNumOperands() - 1; i > 0; --i) {
1207 MachineOperand &Op = Inst->getOperand(i);
1208 if (Op.isReg() && Op.getReg() == AMDGPU::SCC)
1209 Inst->RemoveOperand(i);
1212 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) {
1213 // We are converting these to a BFE, so we need to add the missing
1214 // operands for the size and offset.
1215 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16;
1216 Inst->addOperand(Inst->getOperand(1));
1217 Inst->getOperand(1).ChangeToImmediate(0);
1218 Inst->addOperand(MachineOperand::CreateImm(0));
1219 Inst->addOperand(MachineOperand::CreateImm(0));
1220 Inst->addOperand(MachineOperand::CreateImm(0));
1221 Inst->addOperand(MachineOperand::CreateImm(Size));
1223 // XXX - Other pointless operands. There are 4, but it seems you only need
1224 // 3 to not hit an assertion later in MCInstLower.
1225 Inst->addOperand(MachineOperand::CreateImm(0));
1226 Inst->addOperand(MachineOperand::CreateImm(0));
1227 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) {
1228 // The VALU version adds the second operand to the result, so insert an
1230 Inst->addOperand(MachineOperand::CreateImm(0));
1233 addDescImplicitUseDef(NewDesc, Inst);
1235 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) {
1236 const MachineOperand &OffsetWidthOp = Inst->getOperand(2);
1237 // If we need to move this to VGPRs, we need to unpack the second operand
1238 // back into the 2 separate ones for bit offset and width.
1239 assert(OffsetWidthOp.isImm() &&
1240 "Scalar BFE is only implemented for constant width and offset");
1241 uint32_t Imm = OffsetWidthOp.getImm();
1243 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
1244 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
1246 Inst->RemoveOperand(2); // Remove old immediate.
1247 Inst->addOperand(Inst->getOperand(1));
1248 Inst->getOperand(1).ChangeToImmediate(0);
1249 Inst->addOperand(MachineOperand::CreateImm(0));
1250 Inst->addOperand(MachineOperand::CreateImm(Offset));
1251 Inst->addOperand(MachineOperand::CreateImm(0));
1252 Inst->addOperand(MachineOperand::CreateImm(BitWidth));
1253 Inst->addOperand(MachineOperand::CreateImm(0));
1254 Inst->addOperand(MachineOperand::CreateImm(0));
1257 // Update the destination register class.
1259 const TargetRegisterClass *NewDstRC = getOpRegClass(*Inst, 0);
1262 // For target instructions, getOpRegClass just returns the virtual
1263 // register class associated with the operand, so we need to find an
1264 // equivalent VGPR register class in order to move the instruction to the
1268 case AMDGPU::REG_SEQUENCE:
1269 case AMDGPU::INSERT_SUBREG:
1270 if (RI.hasVGPRs(NewDstRC))
1272 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
1280 unsigned DstReg = Inst->getOperand(0).getReg();
1281 unsigned NewDstReg = MRI.createVirtualRegister(NewDstRC);
1282 MRI.replaceRegWith(DstReg, NewDstReg);
1284 // Legalize the operands
1285 legalizeOperands(Inst);
1287 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(NewDstReg),
1288 E = MRI.use_end(); I != E; ++I) {
1289 MachineInstr &UseMI = *I->getParent();
1290 if (!canReadVGPR(UseMI, I.getOperandNo())) {
1291 Worklist.push_back(&UseMI);
1297 //===----------------------------------------------------------------------===//
1298 // Indirect addressing callbacks
1299 //===----------------------------------------------------------------------===//
1301 unsigned SIInstrInfo::calculateIndirectAddress(unsigned RegIndex,
1302 unsigned Channel) const {
1303 assert(Channel == 0);
1307 const TargetRegisterClass *SIInstrInfo::getIndirectAddrRegClass() const {
1308 return &AMDGPU::VReg_32RegClass;
1311 void SIInstrInfo::splitScalar64BitUnaryOp(
1312 SmallVectorImpl<MachineInstr *> &Worklist,
1314 unsigned Opcode) const {
1315 MachineBasicBlock &MBB = *Inst->getParent();
1316 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1318 MachineOperand &Dest = Inst->getOperand(0);
1319 MachineOperand &Src0 = Inst->getOperand(1);
1320 DebugLoc DL = Inst->getDebugLoc();
1322 MachineBasicBlock::iterator MII = Inst;
1324 const MCInstrDesc &InstDesc = get(Opcode);
1325 const TargetRegisterClass *Src0RC = Src0.isReg() ?
1326 MRI.getRegClass(Src0.getReg()) :
1327 &AMDGPU::SGPR_32RegClass;
1329 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
1331 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
1332 AMDGPU::sub0, Src0SubRC);
1334 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
1335 const TargetRegisterClass *DestSubRC = RI.getSubRegClass(DestRC, AMDGPU::sub0);
1337 unsigned DestSub0 = MRI.createVirtualRegister(DestRC);
1338 MachineInstr *LoHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub0)
1339 .addOperand(SrcReg0Sub0);
1341 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
1342 AMDGPU::sub1, Src0SubRC);
1344 unsigned DestSub1 = MRI.createVirtualRegister(DestSubRC);
1345 MachineInstr *HiHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub1)
1346 .addOperand(SrcReg0Sub1);
1348 unsigned FullDestReg = MRI.createVirtualRegister(DestRC);
1349 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
1351 .addImm(AMDGPU::sub0)
1353 .addImm(AMDGPU::sub1);
1355 MRI.replaceRegWith(Dest.getReg(), FullDestReg);
1357 // Try to legalize the operands in case we need to swap the order to keep it
1359 Worklist.push_back(LoHalf);
1360 Worklist.push_back(HiHalf);
1363 void SIInstrInfo::splitScalar64BitBinaryOp(
1364 SmallVectorImpl<MachineInstr *> &Worklist,
1366 unsigned Opcode) const {
1367 MachineBasicBlock &MBB = *Inst->getParent();
1368 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1370 MachineOperand &Dest = Inst->getOperand(0);
1371 MachineOperand &Src0 = Inst->getOperand(1);
1372 MachineOperand &Src1 = Inst->getOperand(2);
1373 DebugLoc DL = Inst->getDebugLoc();
1375 MachineBasicBlock::iterator MII = Inst;
1377 const MCInstrDesc &InstDesc = get(Opcode);
1378 const TargetRegisterClass *Src0RC = Src0.isReg() ?
1379 MRI.getRegClass(Src0.getReg()) :
1380 &AMDGPU::SGPR_32RegClass;
1382 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
1383 const TargetRegisterClass *Src1RC = Src1.isReg() ?
1384 MRI.getRegClass(Src1.getReg()) :
1385 &AMDGPU::SGPR_32RegClass;
1387 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0);
1389 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
1390 AMDGPU::sub0, Src0SubRC);
1391 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
1392 AMDGPU::sub0, Src1SubRC);
1394 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
1395 const TargetRegisterClass *DestSubRC = RI.getSubRegClass(DestRC, AMDGPU::sub0);
1397 unsigned DestSub0 = MRI.createVirtualRegister(DestRC);
1398 MachineInstr *LoHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub0)
1399 .addOperand(SrcReg0Sub0)
1400 .addOperand(SrcReg1Sub0);
1402 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
1403 AMDGPU::sub1, Src0SubRC);
1404 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
1405 AMDGPU::sub1, Src1SubRC);
1407 unsigned DestSub1 = MRI.createVirtualRegister(DestSubRC);
1408 MachineInstr *HiHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub1)
1409 .addOperand(SrcReg0Sub1)
1410 .addOperand(SrcReg1Sub1);
1412 unsigned FullDestReg = MRI.createVirtualRegister(DestRC);
1413 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
1415 .addImm(AMDGPU::sub0)
1417 .addImm(AMDGPU::sub1);
1419 MRI.replaceRegWith(Dest.getReg(), FullDestReg);
1421 // Try to legalize the operands in case we need to swap the order to keep it
1423 Worklist.push_back(LoHalf);
1424 Worklist.push_back(HiHalf);
1427 void SIInstrInfo::splitScalar64BitBCNT(SmallVectorImpl<MachineInstr *> &Worklist,
1428 MachineInstr *Inst) const {
1429 MachineBasicBlock &MBB = *Inst->getParent();
1430 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1432 MachineBasicBlock::iterator MII = Inst;
1433 DebugLoc DL = Inst->getDebugLoc();
1435 MachineOperand &Dest = Inst->getOperand(0);
1436 MachineOperand &Src = Inst->getOperand(1);
1438 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e32);
1439 const TargetRegisterClass *SrcRC = Src.isReg() ?
1440 MRI.getRegClass(Src.getReg()) :
1441 &AMDGPU::SGPR_32RegClass;
1443 unsigned MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1444 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1446 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0);
1448 MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
1449 AMDGPU::sub0, SrcSubRC);
1450 MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
1451 AMDGPU::sub1, SrcSubRC);
1453 MachineInstr *First = BuildMI(MBB, MII, DL, InstDesc, MidReg)
1454 .addOperand(SrcRegSub0)
1457 MachineInstr *Second = BuildMI(MBB, MII, DL, InstDesc, ResultReg)
1458 .addOperand(SrcRegSub1)
1461 MRI.replaceRegWith(Dest.getReg(), ResultReg);
1463 Worklist.push_back(First);
1464 Worklist.push_back(Second);
1467 void SIInstrInfo::addDescImplicitUseDef(const MCInstrDesc &NewDesc,
1468 MachineInstr *Inst) const {
1469 // Add the implict and explicit register definitions.
1470 if (NewDesc.ImplicitUses) {
1471 for (unsigned i = 0; NewDesc.ImplicitUses[i]; ++i) {
1472 unsigned Reg = NewDesc.ImplicitUses[i];
1473 Inst->addOperand(MachineOperand::CreateReg(Reg, false, true));
1477 if (NewDesc.ImplicitDefs) {
1478 for (unsigned i = 0; NewDesc.ImplicitDefs[i]; ++i) {
1479 unsigned Reg = NewDesc.ImplicitDefs[i];
1480 Inst->addOperand(MachineOperand::CreateReg(Reg, true, true));
1485 MachineInstrBuilder SIInstrInfo::buildIndirectWrite(
1486 MachineBasicBlock *MBB,
1487 MachineBasicBlock::iterator I,
1489 unsigned Address, unsigned OffsetReg) const {
1490 const DebugLoc &DL = MBB->findDebugLoc(I);
1491 unsigned IndirectBaseReg = AMDGPU::VReg_32RegClass.getRegister(
1492 getIndirectIndexBegin(*MBB->getParent()));
1494 return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_DST_V1))
1495 .addReg(IndirectBaseReg, RegState::Define)
1496 .addOperand(I->getOperand(0))
1497 .addReg(IndirectBaseReg)
1503 MachineInstrBuilder SIInstrInfo::buildIndirectRead(
1504 MachineBasicBlock *MBB,
1505 MachineBasicBlock::iterator I,
1507 unsigned Address, unsigned OffsetReg) const {
1508 const DebugLoc &DL = MBB->findDebugLoc(I);
1509 unsigned IndirectBaseReg = AMDGPU::VReg_32RegClass.getRegister(
1510 getIndirectIndexBegin(*MBB->getParent()));
1512 return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_SRC))
1513 .addOperand(I->getOperand(0))
1514 .addOperand(I->getOperand(1))
1515 .addReg(IndirectBaseReg)
1521 void SIInstrInfo::reserveIndirectRegisters(BitVector &Reserved,
1522 const MachineFunction &MF) const {
1523 int End = getIndirectIndexEnd(MF);
1524 int Begin = getIndirectIndexBegin(MF);
1530 for (int Index = Begin; Index <= End; ++Index)
1531 Reserved.set(AMDGPU::VReg_32RegClass.getRegister(Index));
1533 for (int Index = std::max(0, Begin - 1); Index <= End; ++Index)
1534 Reserved.set(AMDGPU::VReg_64RegClass.getRegister(Index));
1536 for (int Index = std::max(0, Begin - 2); Index <= End; ++Index)
1537 Reserved.set(AMDGPU::VReg_96RegClass.getRegister(Index));
1539 for (int Index = std::max(0, Begin - 3); Index <= End; ++Index)
1540 Reserved.set(AMDGPU::VReg_128RegClass.getRegister(Index));
1542 for (int Index = std::max(0, Begin - 7); Index <= End; ++Index)
1543 Reserved.set(AMDGPU::VReg_256RegClass.getRegister(Index));
1545 for (int Index = std::max(0, Begin - 15); Index <= End; ++Index)
1546 Reserved.set(AMDGPU::VReg_512RegClass.getRegister(Index));