1 //===- SPUInstrInfo.cpp - Cell SPU Instruction Information ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the Cell SPU implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "SPURegisterNames.h"
15 #include "SPUInstrInfo.h"
16 #include "SPUInstrBuilder.h"
17 #include "SPUTargetMachine.h"
18 #include "SPUGenInstrInfo.inc"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/Support/Streams.h"
21 #include "llvm/Support/Debug.h"
26 //! Predicate for an unconditional branch instruction
27 inline bool isUncondBranch(const MachineInstr *I) {
28 unsigned opc = I->getOpcode();
30 return (opc == SPU::BR
35 //! Predicate for a conditional branch instruction
36 inline bool isCondBranch(const MachineInstr *I) {
37 unsigned opc = I->getOpcode();
39 return (opc == SPU::BRNZr32
40 || opc == SPU::BRNZv4i32
42 || opc == SPU::BRZv4i32
43 || opc == SPU::BRHNZr16
44 || opc == SPU::BRHNZv8i16
45 || opc == SPU::BRHZr16
46 || opc == SPU::BRHZv8i16);
50 SPUInstrInfo::SPUInstrInfo(SPUTargetMachine &tm)
51 : TargetInstrInfoImpl(SPUInsts, sizeof(SPUInsts)/sizeof(SPUInsts[0])),
53 RI(*TM.getSubtargetImpl(), *this)
57 SPUInstrInfo::isMoveInstr(const MachineInstr& MI,
60 unsigned& SrcSR, unsigned& DstSR) const {
61 SrcSR = DstSR = 0; // No sub-registers.
63 // Primarily, ORI and OR are generated by copyRegToReg. But, there are other
64 // cases where we can safely say that what's being done is really a move
65 // (see how PowerPC does this -- it's the model for this code too.)
66 switch (MI.getOpcode()) {
81 assert(MI.getNumOperands() == 3 &&
82 MI.getOperand(0).isReg() &&
83 MI.getOperand(1).isReg() &&
84 MI.getOperand(2).isImm() &&
85 "invalid SPU ORI/ORHI/ORBI/AHI/AI/SFI/SFHI instruction!");
86 if (MI.getOperand(2).getImm() == 0) {
87 sourceReg = MI.getOperand(1).getReg();
88 destReg = MI.getOperand(0).getReg();
93 assert(MI.getNumOperands() == 3 &&
94 "wrong number of operands to AIr32");
95 if (MI.getOperand(0).isReg() &&
96 MI.getOperand(1).isReg() &&
97 (MI.getOperand(2).isImm() &&
98 MI.getOperand(2).getImm() == 0)) {
99 sourceReg = MI.getOperand(1).getReg();
100 destReg = MI.getOperand(0).getReg();
117 case SPU::ORv16i8_i8:
118 case SPU::ORv8i16_i16:
119 case SPU::ORv4i32_i32:
120 case SPU::ORv2i64_i64:
121 case SPU::ORv4f32_f32:
122 case SPU::ORv2f64_f64:
123 case SPU::ORi8_v16i8:
124 case SPU::ORi16_v8i16:
125 case SPU::ORi32_v4i32:
126 case SPU::ORi64_v2i64:
127 case SPU::ORf32_v4f32:
128 case SPU::ORf64_v2f64:
130 case SPU::ORi128_r64:
131 case SPU::ORi128_f64:
132 case SPU::ORi128_r32:
133 case SPU::ORi128_f32:
134 case SPU::ORi128_r16:
136 case SPU::ORi128_vec:
137 case SPU::ORr64_i128:
138 case SPU::ORf64_i128:
139 case SPU::ORr32_i128:
140 case SPU::ORf32_i128:
141 case SPU::ORr16_i128:
143 case SPU::ORvec_i128:
160 case SPU::ORr64_f64: {
161 assert(MI.getNumOperands() == 2 &&
162 MI.getOperand(0).isReg() &&
163 MI.getOperand(1).isReg() &&
164 "invalid SPU OR<type>_<vec> or LR instruction!");
165 if (MI.getOperand(0).getReg() == MI.getOperand(1).getReg()) {
166 sourceReg = MI.getOperand(0).getReg();
167 destReg = MI.getOperand(0).getReg();
182 assert(MI.getNumOperands() == 3 &&
183 MI.getOperand(0).isReg() &&
184 MI.getOperand(1).isReg() &&
185 MI.getOperand(2).isReg() &&
186 "invalid SPU OR(vec|r32|r64|gprc) instruction!");
187 if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) {
188 sourceReg = MI.getOperand(1).getReg();
189 destReg = MI.getOperand(0).getReg();
199 SPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
200 int &FrameIndex) const {
201 switch (MI->getOpcode()) {
212 const MachineOperand MOp1 = MI->getOperand(1);
213 const MachineOperand MOp2 = MI->getOperand(2);
214 if (MOp1.isImm() && MOp2.isFI()) {
215 FrameIndex = MOp2.getIndex();
216 return MI->getOperand(0).getReg();
225 SPUInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
226 int &FrameIndex) const {
227 switch (MI->getOpcode()) {
239 const MachineOperand MOp1 = MI->getOperand(1);
240 const MachineOperand MOp2 = MI->getOperand(2);
241 if (MOp1.isImm() && MOp2.isFI()) {
242 FrameIndex = MOp2.getIndex();
243 return MI->getOperand(0).getReg();
251 bool SPUInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
252 MachineBasicBlock::iterator MI,
253 unsigned DestReg, unsigned SrcReg,
254 const TargetRegisterClass *DestRC,
255 const TargetRegisterClass *SrcRC) const
257 // We support cross register class moves for our aliases, such as R3 in any
258 // reg class to any other reg class containing R3. This is required because
259 // we instruction select bitconvert i64 -> f64 as a noop for example, so our
260 // types have no specific meaning.
262 DebugLoc DL = DebugLoc::getUnknownLoc();
263 if (MI != MBB.end()) DL = MI->getDebugLoc();
265 if (DestRC == SPU::R8CRegisterClass) {
266 BuildMI(MBB, MI, DL, get(SPU::LRr8), DestReg).addReg(SrcReg);
267 } else if (DestRC == SPU::R16CRegisterClass) {
268 BuildMI(MBB, MI, DL, get(SPU::LRr16), DestReg).addReg(SrcReg);
269 } else if (DestRC == SPU::R32CRegisterClass) {
270 BuildMI(MBB, MI, DL, get(SPU::LRr32), DestReg).addReg(SrcReg);
271 } else if (DestRC == SPU::R32FPRegisterClass) {
272 BuildMI(MBB, MI, DL, get(SPU::LRf32), DestReg).addReg(SrcReg);
273 } else if (DestRC == SPU::R64CRegisterClass) {
274 BuildMI(MBB, MI, DL, get(SPU::LRr64), DestReg).addReg(SrcReg);
275 } else if (DestRC == SPU::R64FPRegisterClass) {
276 BuildMI(MBB, MI, DL, get(SPU::LRf64), DestReg).addReg(SrcReg);
277 } else if (DestRC == SPU::GPRCRegisterClass) {
278 BuildMI(MBB, MI, DL, get(SPU::LRr128), DestReg).addReg(SrcReg);
279 } else if (DestRC == SPU::VECREGRegisterClass) {
280 BuildMI(MBB, MI, DL, get(SPU::LRv16i8), DestReg).addReg(SrcReg);
282 // Attempt to copy unknown/unsupported register class!
290 SPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
291 MachineBasicBlock::iterator MI,
292 unsigned SrcReg, bool isKill, int FrameIdx,
293 const TargetRegisterClass *RC) const
296 bool isValidFrameIdx = (FrameIdx < SPUFrameInfo::maxFrameOffset());
297 if (RC == SPU::GPRCRegisterClass) {
298 opc = (isValidFrameIdx ? SPU::STQDr128 : SPU::STQXr128);
299 } else if (RC == SPU::R64CRegisterClass) {
300 opc = (isValidFrameIdx ? SPU::STQDr64 : SPU::STQXr64);
301 } else if (RC == SPU::R64FPRegisterClass) {
302 opc = (isValidFrameIdx ? SPU::STQDr64 : SPU::STQXr64);
303 } else if (RC == SPU::R32CRegisterClass) {
304 opc = (isValidFrameIdx ? SPU::STQDr32 : SPU::STQXr32);
305 } else if (RC == SPU::R32FPRegisterClass) {
306 opc = (isValidFrameIdx ? SPU::STQDr32 : SPU::STQXr32);
307 } else if (RC == SPU::R16CRegisterClass) {
308 opc = (isValidFrameIdx ? SPU::STQDr16 : SPU::STQXr16);
309 } else if (RC == SPU::R8CRegisterClass) {
310 opc = (isValidFrameIdx ? SPU::STQDr8 : SPU::STQXr8);
311 } else if (RC == SPU::VECREGRegisterClass) {
312 opc = (isValidFrameIdx) ? SPU::STQDv16i8 : SPU::STQXv16i8;
314 assert(0 && "Unknown regclass!");
318 DebugLoc DL = DebugLoc::getUnknownLoc();
319 if (MI != MBB.end()) DL = MI->getDebugLoc();
320 addFrameReference(BuildMI(MBB, MI, DL, get(opc))
321 .addReg(SrcReg, false, false, isKill), FrameIdx);
324 void SPUInstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
326 SmallVectorImpl<MachineOperand> &Addr,
327 const TargetRegisterClass *RC,
328 SmallVectorImpl<MachineInstr*> &NewMIs) const {
329 cerr << "storeRegToAddr() invoked!\n";
332 if (Addr[0].isFI()) {
333 /* do what storeRegToStackSlot does here */
336 if (RC == SPU::GPRCRegisterClass) {
337 /* Opc = PPC::STW; */
338 } else if (RC == SPU::R16CRegisterClass) {
339 /* Opc = PPC::STD; */
340 } else if (RC == SPU::R32CRegisterClass) {
341 /* Opc = PPC::STFD; */
342 } else if (RC == SPU::R32FPRegisterClass) {
343 /* Opc = PPC::STFD; */
344 } else if (RC == SPU::R64FPRegisterClass) {
345 /* Opc = PPC::STFS; */
346 } else if (RC == SPU::VECREGRegisterClass) {
347 /* Opc = PPC::STVX; */
349 assert(0 && "Unknown regclass!");
352 DebugLoc DL = DebugLoc::getUnknownLoc();
353 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc))
354 .addReg(SrcReg, false, false, isKill);
355 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
356 MachineOperand &MO = Addr[i];
358 MIB.addReg(MO.getReg());
360 MIB.addImm(MO.getImm());
362 MIB.addFrameIndex(MO.getIndex());
364 NewMIs.push_back(MIB);
369 SPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
370 MachineBasicBlock::iterator MI,
371 unsigned DestReg, int FrameIdx,
372 const TargetRegisterClass *RC) const
375 bool isValidFrameIdx = (FrameIdx < SPUFrameInfo::maxFrameOffset());
376 if (RC == SPU::GPRCRegisterClass) {
377 opc = (isValidFrameIdx ? SPU::LQDr128 : SPU::LQXr128);
378 } else if (RC == SPU::R64CRegisterClass) {
379 opc = (isValidFrameIdx ? SPU::LQDr64 : SPU::LQXr64);
380 } else if (RC == SPU::R64FPRegisterClass) {
381 opc = (isValidFrameIdx ? SPU::LQDr64 : SPU::LQXr64);
382 } else if (RC == SPU::R32CRegisterClass) {
383 opc = (isValidFrameIdx ? SPU::LQDr32 : SPU::LQXr32);
384 } else if (RC == SPU::R32FPRegisterClass) {
385 opc = (isValidFrameIdx ? SPU::LQDr32 : SPU::LQXr32);
386 } else if (RC == SPU::R16CRegisterClass) {
387 opc = (isValidFrameIdx ? SPU::LQDr16 : SPU::LQXr16);
388 } else if (RC == SPU::R8CRegisterClass) {
389 opc = (isValidFrameIdx ? SPU::LQDr8 : SPU::LQXr8);
390 } else if (RC == SPU::VECREGRegisterClass) {
391 opc = (isValidFrameIdx) ? SPU::LQDv16i8 : SPU::LQXv16i8;
393 assert(0 && "Unknown regclass in loadRegFromStackSlot!");
397 DebugLoc DL = DebugLoc::getUnknownLoc();
398 if (MI != MBB.end()) DL = MI->getDebugLoc();
399 addFrameReference(BuildMI(MBB, MI, DL, get(opc)).addReg(DestReg), FrameIdx);
403 \note We are really pessimistic here about what kind of a load we're doing.
405 void SPUInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
406 SmallVectorImpl<MachineOperand> &Addr,
407 const TargetRegisterClass *RC,
408 SmallVectorImpl<MachineInstr*> &NewMIs)
410 cerr << "loadRegToAddr() invoked!\n";
413 if (Addr[0].isFI()) {
414 /* do what loadRegFromStackSlot does here... */
417 if (RC == SPU::R8CRegisterClass) {
418 /* do brilliance here */
419 } else if (RC == SPU::R16CRegisterClass) {
420 /* Opc = PPC::LWZ; */
421 } else if (RC == SPU::R32CRegisterClass) {
423 } else if (RC == SPU::R32FPRegisterClass) {
424 /* Opc = PPC::LFD; */
425 } else if (RC == SPU::R64FPRegisterClass) {
426 /* Opc = PPC::LFS; */
427 } else if (RC == SPU::VECREGRegisterClass) {
428 /* Opc = PPC::LVX; */
429 } else if (RC == SPU::GPRCRegisterClass) {
430 /* Opc = something else! */
432 assert(0 && "Unknown regclass!");
435 DebugLoc DL = DebugLoc::getUnknownLoc();
436 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg);
437 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
438 MachineOperand &MO = Addr[i];
440 MIB.addReg(MO.getReg());
442 MIB.addImm(MO.getImm());
444 MIB.addFrameIndex(MO.getIndex());
446 NewMIs.push_back(MIB);
450 //! Return true if the specified load or store can be folded
452 SPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
453 const SmallVectorImpl<unsigned> &Ops) const {
454 if (Ops.size() != 1) return false;
456 // Make sure this is a reg-reg copy.
457 unsigned Opc = MI->getOpcode();
470 if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg())
478 /// foldMemoryOperand - SPU, like PPC, can only fold spills into
479 /// copy instructions, turning them into load/store instructions.
481 SPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
483 const SmallVectorImpl<unsigned> &Ops,
484 int FrameIndex) const
486 if (Ops.size() != 1) return 0;
488 unsigned OpNum = Ops[0];
489 unsigned Opc = MI->getOpcode();
490 MachineInstr *NewMI = 0;
503 if (OpNum == 0) { // move -> store
504 unsigned InReg = MI->getOperand(1).getReg();
505 bool isKill = MI->getOperand(1).isKill();
506 if (FrameIndex < SPUFrameInfo::maxFrameOffset()) {
507 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(),
510 MIB.addReg(InReg, false, false, isKill);
511 NewMI = addFrameReference(MIB, FrameIndex);
513 } else { // move -> load
514 unsigned OutReg = MI->getOperand(0).getReg();
515 bool isDead = MI->getOperand(0).isDead();
516 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc));
518 MIB.addReg(OutReg, true, false, false, isDead);
519 Opc = (FrameIndex < SPUFrameInfo::maxFrameOffset())
520 ? SPU::STQDr32 : SPU::STQXr32;
521 NewMI = addFrameReference(MIB, FrameIndex);
531 \note This code was kiped from PPC. There may be more branch analysis for
532 CellSPU than what's currently done here.
535 SPUInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
536 MachineBasicBlock *&FBB,
537 SmallVectorImpl<MachineOperand> &Cond,
538 bool AllowModify) const {
539 // If the block has no terminators, it just falls into the block after it.
540 MachineBasicBlock::iterator I = MBB.end();
541 if (I == MBB.begin() || !isUnpredicatedTerminator(--I))
544 // Get the last instruction in the block.
545 MachineInstr *LastInst = I;
547 // If there is only one terminator instruction, process it.
548 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
549 if (isUncondBranch(LastInst)) {
550 TBB = LastInst->getOperand(0).getMBB();
552 } else if (isCondBranch(LastInst)) {
553 // Block ends with fall-through condbranch.
554 TBB = LastInst->getOperand(1).getMBB();
555 DEBUG(cerr << "Pushing LastInst: ");
556 DEBUG(LastInst->dump());
557 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
558 Cond.push_back(LastInst->getOperand(0));
561 // Otherwise, don't know what this is.
565 // Get the instruction before it if it's a terminator.
566 MachineInstr *SecondLastInst = I;
568 // If there are three terminators, we don't know what sort of block this is.
569 if (SecondLastInst && I != MBB.begin() &&
570 isUnpredicatedTerminator(--I))
573 // If the block ends with a conditional and unconditional branch, handle it.
574 if (isCondBranch(SecondLastInst) && isUncondBranch(LastInst)) {
575 TBB = SecondLastInst->getOperand(1).getMBB();
576 DEBUG(cerr << "Pushing SecondLastInst: ");
577 DEBUG(SecondLastInst->dump());
578 Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
579 Cond.push_back(SecondLastInst->getOperand(0));
580 FBB = LastInst->getOperand(0).getMBB();
584 // If the block ends with two unconditional branches, handle it. The second
585 // one is not executed, so remove it.
586 if (isUncondBranch(SecondLastInst) && isUncondBranch(LastInst)) {
587 TBB = SecondLastInst->getOperand(0).getMBB();
590 I->eraseFromParent();
594 // Otherwise, can't handle this.
599 SPUInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
600 MachineBasicBlock::iterator I = MBB.end();
601 if (I == MBB.begin())
604 if (!isCondBranch(I) && !isUncondBranch(I))
607 // Remove the first branch.
608 DEBUG(cerr << "Removing branch: ");
610 I->eraseFromParent();
612 if (I == MBB.begin())
616 if (!(isCondBranch(I) || isUncondBranch(I)))
619 // Remove the second branch.
620 DEBUG(cerr << "Removing second branch: ");
622 I->eraseFromParent();
627 SPUInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
628 MachineBasicBlock *FBB,
629 const SmallVectorImpl<MachineOperand> &Cond) const {
630 // FIXME this should probably have a DebugLoc argument
631 DebugLoc dl = DebugLoc::getUnknownLoc();
632 // Shouldn't be a fall through.
633 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
634 assert((Cond.size() == 2 || Cond.size() == 0) &&
635 "SPU branch conditions have two components!");
640 // Unconditional branch
641 MachineInstrBuilder MIB = BuildMI(&MBB, dl, get(SPU::BR));
644 DEBUG(cerr << "Inserted one-way uncond branch: ");
645 DEBUG((*MIB).dump());
647 // Conditional branch
648 MachineInstrBuilder MIB = BuildMI(&MBB, dl, get(Cond[0].getImm()));
649 MIB.addReg(Cond[1].getReg()).addMBB(TBB);
651 DEBUG(cerr << "Inserted one-way cond branch: ");
652 DEBUG((*MIB).dump());
656 MachineInstrBuilder MIB = BuildMI(&MBB, dl, get(Cond[0].getImm()));
657 MachineInstrBuilder MIB2 = BuildMI(&MBB, dl, get(SPU::BR));
659 // Two-way Conditional Branch.
660 MIB.addReg(Cond[1].getReg()).addMBB(TBB);
663 DEBUG(cerr << "Inserted conditional branch: ");
664 DEBUG((*MIB).dump());
665 DEBUG(cerr << "part 2: ");
666 DEBUG((*MIB2).dump());
672 SPUInstrInfo::BlockHasNoFallThrough(const MachineBasicBlock &MBB) const {
673 return (!MBB.empty() && isUncondBranch(&MBB.back()));
675 //! Reverses a branch's condition, returning false on success.
677 SPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
679 // Pretty brainless way of inverting the condition, but it works, considering
680 // there are only two conditions...
682 unsigned Opc; //! The incoming opcode
683 unsigned RevCondOpc; //! The reversed condition opcode
685 { SPU::BRNZr32, SPU::BRZr32 },
686 { SPU::BRNZv4i32, SPU::BRZv4i32 },
687 { SPU::BRZr32, SPU::BRNZr32 },
688 { SPU::BRZv4i32, SPU::BRNZv4i32 },
689 { SPU::BRHNZr16, SPU::BRHZr16 },
690 { SPU::BRHNZv8i16, SPU::BRHZv8i16 },
691 { SPU::BRHZr16, SPU::BRHNZr16 },
692 { SPU::BRHZv8i16, SPU::BRHNZv8i16 }
695 unsigned Opc = unsigned(Cond[0].getImm());
696 // Pretty dull mapping between the two conditions that SPU can generate:
697 for (int i = sizeof(revconds)/sizeof(revconds[0]) - 1; i >= 0; --i) {
698 if (revconds[i].Opc == Opc) {
699 Cond[0].setImm(revconds[i].RevCondOpc);