1 //===- SPUInstrInfo.cpp - Cell SPU Instruction Information ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the Cell SPU implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "SPURegisterNames.h"
15 #include "SPUInstrInfo.h"
16 #include "SPUInstrBuilder.h"
17 #include "SPUTargetMachine.h"
18 #include "SPUGenInstrInfo.inc"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/Support/Streams.h"
25 //! Predicate for an unconditional branch instruction
26 inline bool isUncondBranch(const MachineInstr *I) {
27 unsigned opc = I->getOpcode();
29 return (opc == SPU::BR
34 inline bool isCondBranch(const MachineInstr *I) {
35 unsigned opc = I->getOpcode();
37 return (opc == SPU::BRNZr32
38 || opc == SPU::BRNZv4i32
40 || opc == SPU::BRZv4i32
41 || opc == SPU::BRHNZr16
42 || opc == SPU::BRHNZv8i16
43 || opc == SPU::BRHZr16
44 || opc == SPU::BRHZv8i16);
48 SPUInstrInfo::SPUInstrInfo(SPUTargetMachine &tm)
49 : TargetInstrInfoImpl(SPUInsts, sizeof(SPUInsts)/sizeof(SPUInsts[0])),
51 RI(*TM.getSubtargetImpl(), *this)
56 /// getPointerRegClass - Return the register class to use to hold pointers.
57 /// This is used for addressing modes.
58 const TargetRegisterClass *
59 SPUInstrInfo::getPointerRegClass() const
61 return &SPU::R32CRegClass;
65 SPUInstrInfo::isMoveInstr(const MachineInstr& MI,
67 unsigned& destReg) const {
68 // Primarily, ORI and OR are generated by copyRegToReg. But, there are other
69 // cases where we can safely say that what's being done is really a move
70 // (see how PowerPC does this -- it's the model for this code too.)
71 switch (MI.getOpcode()) {
86 assert(MI.getNumOperands() == 3 &&
87 MI.getOperand(0).isReg() &&
88 MI.getOperand(1).isReg() &&
89 MI.getOperand(2).isImm() &&
90 "invalid SPU ORI/ORHI/ORBI/AHI/AI/SFI/SFHI instruction!");
91 if (MI.getOperand(2).getImm() == 0) {
92 sourceReg = MI.getOperand(1).getReg();
93 destReg = MI.getOperand(0).getReg();
98 assert(MI.getNumOperands() == 3 &&
99 "wrong number of operands to AIr32");
100 if (MI.getOperand(0).isReg() &&
101 (MI.getOperand(1).isReg() ||
102 MI.getOperand(1).isFI()) &&
103 (MI.getOperand(2).isImm() &&
104 MI.getOperand(2).getImm() == 0)) {
105 sourceReg = MI.getOperand(1).getReg();
106 destReg = MI.getOperand(0).getReg();
123 case SPU::ORv16i8_i8:
124 case SPU::ORv8i16_i16:
125 case SPU::ORv4i32_i32:
126 case SPU::ORv2i64_i64:
127 case SPU::ORv4f32_f32:
128 case SPU::ORv2f64_f64:
129 case SPU::ORi8_v16i8:
130 case SPU::ORi16_v8i16:
131 case SPU::ORi32_v4i32:
132 case SPU::ORi64_v2i64:
133 case SPU::ORf32_v4f32:
134 case SPU::ORf64_v2f64: {
135 assert(MI.getNumOperands() == 2 &&
136 MI.getOperand(0).isReg() &&
137 MI.getOperand(1).isReg() &&
138 "invalid SPU OR<type>_<vec> instruction!");
139 if (MI.getOperand(0).getReg() == MI.getOperand(1).getReg()) {
140 sourceReg = MI.getOperand(0).getReg();
141 destReg = MI.getOperand(0).getReg();
153 assert(MI.getNumOperands() == 3 &&
154 MI.getOperand(0).isReg() &&
155 MI.getOperand(1).isReg() &&
156 MI.getOperand(2).isReg() &&
157 "invalid SPU OR(vec|r32|r64|gprc) instruction!");
158 if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) {
159 sourceReg = MI.getOperand(1).getReg();
160 destReg = MI.getOperand(0).getReg();
170 SPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
171 int &FrameIndex) const {
172 switch (MI->getOpcode()) {
183 const MachineOperand MOp1 = MI->getOperand(1);
184 const MachineOperand MOp2 = MI->getOperand(2);
187 || (MOp2.isReg() && MOp2.getReg() == SPU::R1))) {
189 FrameIndex = MOp2.getIndex();
191 FrameIndex = MOp1.getImm() / SPUFrameInfo::stackSlotSize();
192 return MI->getOperand(0).getReg();
201 if (MI->getOperand(1).isReg() && MI->getOperand(2).isReg()
202 && (MI->getOperand(2).getReg() == SPU::R1
203 || MI->getOperand(1).getReg() == SPU::R1)) {
204 FrameIndex = MI->getOperand(2).getIndex();
205 return MI->getOperand(0).getReg();
213 SPUInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
214 int &FrameIndex) const {
215 switch (MI->getOpcode()) {
227 const MachineOperand MOp1 = MI->getOperand(1);
228 const MachineOperand MOp2 = MI->getOperand(2);
229 if (MOp1.isImm() && MOp2.isFI()) {
230 FrameIndex = MOp2.getIndex();
231 return MI->getOperand(0).getReg();
246 if (MI->getOperand(1).isReg() && MI->getOperand(2).isReg()
247 && (MI->getOperand(2).getReg() == SPU::R1
248 || MI->getOperand(1).getReg() == SPU::R1)) {
249 FrameIndex = MI->getOperand(2).getIndex();
250 return MI->getOperand(0).getReg();
258 bool SPUInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
259 MachineBasicBlock::iterator MI,
260 unsigned DestReg, unsigned SrcReg,
261 const TargetRegisterClass *DestRC,
262 const TargetRegisterClass *SrcRC) const
264 // We support cross register class moves for our aliases, such as R3 in any
265 // reg class to any other reg class containing R3. This is required because
266 // we instruction select bitconvert i64 -> f64 as a noop for example, so our
267 // types have no specific meaning.
269 if (DestRC == SPU::R8CRegisterClass) {
270 BuildMI(MBB, MI, get(SPU::ORBIr8), DestReg).addReg(SrcReg).addImm(0);
271 } else if (DestRC == SPU::R16CRegisterClass) {
272 BuildMI(MBB, MI, get(SPU::ORHIr16), DestReg).addReg(SrcReg).addImm(0);
273 } else if (DestRC == SPU::R32CRegisterClass) {
274 BuildMI(MBB, MI, get(SPU::ORIr32), DestReg).addReg(SrcReg).addImm(0);
275 } else if (DestRC == SPU::R32FPRegisterClass) {
276 BuildMI(MBB, MI, get(SPU::ORf32), DestReg).addReg(SrcReg)
278 } else if (DestRC == SPU::R64CRegisterClass) {
279 BuildMI(MBB, MI, get(SPU::ORr64), DestReg).addReg(SrcReg)
281 } else if (DestRC == SPU::R64FPRegisterClass) {
282 BuildMI(MBB, MI, get(SPU::ORf64), DestReg).addReg(SrcReg)
284 } /* else if (DestRC == SPU::GPRCRegisterClass) {
285 BuildMI(MBB, MI, get(SPU::ORgprc), DestReg).addReg(SrcReg)
287 } */ else if (DestRC == SPU::VECREGRegisterClass) {
288 BuildMI(MBB, MI, get(SPU::ORv4i32), DestReg).addReg(SrcReg)
291 // Attempt to copy unknown/unsupported register class!
299 SPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
300 MachineBasicBlock::iterator MI,
301 unsigned SrcReg, bool isKill, int FrameIdx,
302 const TargetRegisterClass *RC) const
305 bool isValidFrameIdx = (FrameIdx < SPUFrameInfo::maxFrameOffset());
306 if (RC == SPU::GPRCRegisterClass) {
307 opc = (isValidFrameIdx ? SPU::STQDr128 : SPU::STQXr128);
308 } else if (RC == SPU::R64CRegisterClass) {
309 opc = (isValidFrameIdx ? SPU::STQDr64 : SPU::STQXr64);
310 } else if (RC == SPU::R64FPRegisterClass) {
311 opc = (isValidFrameIdx ? SPU::STQDr64 : SPU::STQXr64);
312 } else if (RC == SPU::R32CRegisterClass) {
313 opc = (isValidFrameIdx ? SPU::STQDr32 : SPU::STQXr32);
314 } else if (RC == SPU::R32FPRegisterClass) {
315 opc = (isValidFrameIdx ? SPU::STQDr32 : SPU::STQXr32);
316 } else if (RC == SPU::R16CRegisterClass) {
317 opc = (isValidFrameIdx ? SPU::STQDr16 : SPU::STQXr16);
318 } else if (RC == SPU::R8CRegisterClass) {
319 opc = (isValidFrameIdx ? SPU::STQDr8 : SPU::STQXr8);
320 } else if (RC == SPU::VECREGRegisterClass) {
321 opc = (isValidFrameIdx) ? SPU::STQDv16i8 : SPU::STQXv16i8;
323 assert(0 && "Unknown regclass!");
327 addFrameReference(BuildMI(MBB, MI, get(opc))
328 .addReg(SrcReg, false, false, isKill), FrameIdx);
331 void SPUInstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
333 SmallVectorImpl<MachineOperand> &Addr,
334 const TargetRegisterClass *RC,
335 SmallVectorImpl<MachineInstr*> &NewMIs) const {
336 cerr << "storeRegToAddr() invoked!\n";
339 if (Addr[0].isFI()) {
340 /* do what storeRegToStackSlot does here */
343 if (RC == SPU::GPRCRegisterClass) {
344 /* Opc = PPC::STW; */
345 } else if (RC == SPU::R16CRegisterClass) {
346 /* Opc = PPC::STD; */
347 } else if (RC == SPU::R32CRegisterClass) {
348 /* Opc = PPC::STFD; */
349 } else if (RC == SPU::R32FPRegisterClass) {
350 /* Opc = PPC::STFD; */
351 } else if (RC == SPU::R64FPRegisterClass) {
352 /* Opc = PPC::STFS; */
353 } else if (RC == SPU::VECREGRegisterClass) {
354 /* Opc = PPC::STVX; */
356 assert(0 && "Unknown regclass!");
359 MachineInstrBuilder MIB = BuildMI(MF, get(Opc))
360 .addReg(SrcReg, false, false, isKill);
361 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
362 MachineOperand &MO = Addr[i];
364 MIB.addReg(MO.getReg());
366 MIB.addImm(MO.getImm());
368 MIB.addFrameIndex(MO.getIndex());
370 NewMIs.push_back(MIB);
375 SPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
376 MachineBasicBlock::iterator MI,
377 unsigned DestReg, int FrameIdx,
378 const TargetRegisterClass *RC) const
381 bool isValidFrameIdx = (FrameIdx < SPUFrameInfo::maxFrameOffset());
382 if (RC == SPU::GPRCRegisterClass) {
383 opc = (isValidFrameIdx ? SPU::LQDr128 : SPU::LQXr128);
384 } else if (RC == SPU::R64CRegisterClass) {
385 opc = (isValidFrameIdx ? SPU::LQDr64 : SPU::LQXr64);
386 } else if (RC == SPU::R64FPRegisterClass) {
387 opc = (isValidFrameIdx ? SPU::LQDr64 : SPU::LQXr64);
388 } else if (RC == SPU::R32CRegisterClass) {
389 opc = (isValidFrameIdx ? SPU::LQDr32 : SPU::LQXr32);
390 } else if (RC == SPU::R32FPRegisterClass) {
391 opc = (isValidFrameIdx ? SPU::LQDr32 : SPU::LQXr32);
392 } else if (RC == SPU::R16CRegisterClass) {
393 opc = (isValidFrameIdx ? SPU::LQDr16 : SPU::LQXr16);
394 } else if (RC == SPU::R8CRegisterClass) {
395 opc = (isValidFrameIdx ? SPU::LQDr8 : SPU::LQXr8);
396 } else if (RC == SPU::VECREGRegisterClass) {
397 opc = (isValidFrameIdx) ? SPU::LQDv16i8 : SPU::LQXv16i8;
399 assert(0 && "Unknown regclass in loadRegFromStackSlot!");
403 addFrameReference(BuildMI(MBB, MI, get(opc)).addReg(DestReg), FrameIdx);
407 \note We are really pessimistic here about what kind of a load we're doing.
409 void SPUInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
410 SmallVectorImpl<MachineOperand> &Addr,
411 const TargetRegisterClass *RC,
412 SmallVectorImpl<MachineInstr*> &NewMIs)
414 cerr << "loadRegToAddr() invoked!\n";
417 if (Addr[0].isFI()) {
418 /* do what loadRegFromStackSlot does here... */
421 if (RC == SPU::R8CRegisterClass) {
422 /* do brilliance here */
423 } else if (RC == SPU::R16CRegisterClass) {
424 /* Opc = PPC::LWZ; */
425 } else if (RC == SPU::R32CRegisterClass) {
427 } else if (RC == SPU::R32FPRegisterClass) {
428 /* Opc = PPC::LFD; */
429 } else if (RC == SPU::R64FPRegisterClass) {
430 /* Opc = PPC::LFS; */
431 } else if (RC == SPU::VECREGRegisterClass) {
432 /* Opc = PPC::LVX; */
433 } else if (RC == SPU::GPRCRegisterClass) {
434 /* Opc = something else! */
436 assert(0 && "Unknown regclass!");
439 MachineInstrBuilder MIB = BuildMI(MF, get(Opc), DestReg);
440 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
441 MachineOperand &MO = Addr[i];
443 MIB.addReg(MO.getReg());
445 MIB.addImm(MO.getImm());
447 MIB.addFrameIndex(MO.getIndex());
449 NewMIs.push_back(MIB);
453 /// foldMemoryOperand - SPU, like PPC, can only fold spills into
454 /// copy instructions, turning them into load/store instructions.
456 SPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
458 const SmallVectorImpl<unsigned> &Ops,
459 int FrameIndex) const
461 #if SOMEDAY_SCOTT_LOOKS_AT_ME_AGAIN
462 if (Ops.size() != 1) return NULL;
464 unsigned OpNum = Ops[0];
465 unsigned Opc = MI->getOpcode();
466 MachineInstr *NewMI = 0;
468 if ((Opc == SPU::ORr32
469 || Opc == SPU::ORv4i32)
470 && MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
471 if (OpNum == 0) { // move -> store
472 unsigned InReg = MI->getOperand(1).getReg();
473 bool isKill = MI->getOperand(1).isKill();
474 if (FrameIndex < SPUFrameInfo::maxFrameOffset()) {
475 NewMI = addFrameReference(BuildMI(MF, TII.get(SPU::STQDr32))
476 .addReg(InReg, false, false, isKill),
479 } else { // move -> load
480 unsigned OutReg = MI->getOperand(0).getReg();
481 bool isDead = MI->getOperand(0).isDead();
482 Opc = (FrameIndex < SPUFrameInfo::maxFrameOffset())
483 ? SPU::STQDr32 : SPU::STQXr32;
484 NewMI = addFrameReference(BuildMI(MF, TII.get(Opc))
485 .addReg(OutReg, true, false, false, isDead), FrameIndex);
497 \note This code was kiped from PPC. There may be more branch analysis for
498 CellSPU than what's currently done here.
501 SPUInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
502 MachineBasicBlock *&FBB,
503 SmallVectorImpl<MachineOperand> &Cond) const {
504 // If the block has no terminators, it just falls into the block after it.
505 MachineBasicBlock::iterator I = MBB.end();
506 if (I == MBB.begin() || !isUnpredicatedTerminator(--I))
509 // Get the last instruction in the block.
510 MachineInstr *LastInst = I;
512 // If there is only one terminator instruction, process it.
513 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
514 if (isUncondBranch(LastInst)) {
515 TBB = LastInst->getOperand(0).getMBB();
517 } else if (isCondBranch(LastInst)) {
518 // Block ends with fall-through condbranch.
519 TBB = LastInst->getOperand(1).getMBB();
520 Cond.push_back(LastInst->getOperand(0));
521 Cond.push_back(LastInst->getOperand(1));
524 // Otherwise, don't know what this is.
528 // Get the instruction before it if it's a terminator.
529 MachineInstr *SecondLastInst = I;
531 // If there are three terminators, we don't know what sort of block this is.
532 if (SecondLastInst && I != MBB.begin() &&
533 isUnpredicatedTerminator(--I))
536 // If the block ends with a conditional and unconditional branch, handle it.
537 if (isCondBranch(SecondLastInst) && isUncondBranch(LastInst)) {
538 TBB = SecondLastInst->getOperand(1).getMBB();
539 Cond.push_back(SecondLastInst->getOperand(0));
540 Cond.push_back(SecondLastInst->getOperand(1));
541 FBB = LastInst->getOperand(0).getMBB();
545 // If the block ends with two unconditional branches, handle it. The second
546 // one is not executed, so remove it.
547 if (isUncondBranch(SecondLastInst) && isUncondBranch(LastInst)) {
548 TBB = SecondLastInst->getOperand(0).getMBB();
550 I->eraseFromParent();
554 // Otherwise, can't handle this.
559 SPUInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
560 MachineBasicBlock::iterator I = MBB.end();
561 if (I == MBB.begin())
564 if (!isCondBranch(I) && !isUncondBranch(I))
567 // Remove the first branch.
568 I->eraseFromParent();
570 if (I == MBB.begin())
577 // Remove the second branch.
578 I->eraseFromParent();
583 SPUInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
584 MachineBasicBlock *FBB,
585 const SmallVectorImpl<MachineOperand> &Cond) const {
586 // Shouldn't be a fall through.
587 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
588 assert((Cond.size() == 2 || Cond.size() == 0) &&
589 "SPU branch conditions have two components!");
593 if (Cond.empty()) // Unconditional branch
594 BuildMI(&MBB, get(SPU::BR)).addMBB(TBB);
595 else { // Conditional branch
596 /* BuildMI(&MBB, get(SPU::BRNZ))
597 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB); */
598 cerr << "SPUInstrInfo::InsertBranch conditional branch logic needed\n";
604 // Two-way Conditional Branch.
606 BuildMI(&MBB, get(SPU::BRNZ))
607 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB);
608 BuildMI(&MBB, get(SPU::BR)).addMBB(FBB);
610 cerr << "SPUInstrInfo::InsertBranch conditional branch logic needed\n";