1 //===- SPUInstrInfo.cpp - Cell SPU Instruction Information ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the Cell SPU implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "SPURegisterNames.h"
15 #include "SPUInstrInfo.h"
16 #include "SPUInstrBuilder.h"
17 #include "SPUTargetMachine.h"
18 #include "SPUGenInstrInfo.inc"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/Support/Streams.h"
25 //! Predicate for an unconditional branch instruction
26 inline bool isUncondBranch(const MachineInstr *I) {
27 unsigned opc = I->getOpcode();
29 return (opc == SPU::BR
34 inline bool isCondBranch(const MachineInstr *I) {
35 unsigned opc = I->getOpcode();
37 return (opc == SPU::BRNZr32
38 || opc == SPU::BRNZv4i32
40 || opc == SPU::BRZv4i32
41 || opc == SPU::BRHNZr16
42 || opc == SPU::BRHNZv8i16
43 || opc == SPU::BRHZr16
44 || opc == SPU::BRHZv8i16);
48 SPUInstrInfo::SPUInstrInfo(SPUTargetMachine &tm)
49 : TargetInstrInfoImpl(SPUInsts, sizeof(SPUInsts)/sizeof(SPUInsts[0])),
51 RI(*TM.getSubtargetImpl(), *this)
56 /// getPointerRegClass - Return the register class to use to hold pointers.
57 /// This is used for addressing modes.
58 const TargetRegisterClass *
59 SPUInstrInfo::getPointerRegClass() const
61 return &SPU::R32CRegClass;
65 SPUInstrInfo::isMoveInstr(const MachineInstr& MI,
67 unsigned& destReg) const {
68 // Primarily, ORI and OR are generated by copyRegToReg. But, there are other
69 // cases where we can safely say that what's being done is really a move
70 // (see how PowerPC does this -- it's the model for this code too.)
71 switch (MI.getOpcode()) {
86 assert(MI.getNumOperands() == 3 &&
87 MI.getOperand(0).isReg() &&
88 MI.getOperand(1).isReg() &&
89 MI.getOperand(2).isImm() &&
90 "invalid SPU ORI/ORHI/ORBI/AHI/AI/SFI/SFHI instruction!");
91 if (MI.getOperand(2).getImm() == 0) {
92 sourceReg = MI.getOperand(1).getReg();
93 destReg = MI.getOperand(0).getReg();
98 assert(MI.getNumOperands() == 3 &&
99 "wrong number of operands to AIr32");
100 if (MI.getOperand(0).isReg() &&
101 MI.getOperand(1).isReg() &&
102 (MI.getOperand(2).isImm() &&
103 MI.getOperand(2).getImm() == 0)) {
104 sourceReg = MI.getOperand(1).getReg();
105 destReg = MI.getOperand(0).getReg();
122 case SPU::ORv16i8_i8:
123 case SPU::ORv8i16_i16:
124 case SPU::ORv4i32_i32:
125 case SPU::ORv2i64_i64:
126 case SPU::ORv4f32_f32:
127 case SPU::ORv2f64_f64:
128 case SPU::ORi8_v16i8:
129 case SPU::ORi16_v8i16:
130 case SPU::ORi32_v4i32:
131 case SPU::ORi64_v2i64:
132 case SPU::ORf32_v4f32:
133 case SPU::ORf64_v2f64: {
134 assert(MI.getNumOperands() == 2 &&
135 MI.getOperand(0).isReg() &&
136 MI.getOperand(1).isReg() &&
137 "invalid SPU OR<type>_<vec> instruction!");
138 if (MI.getOperand(0).getReg() == MI.getOperand(1).getReg()) {
139 sourceReg = MI.getOperand(0).getReg();
140 destReg = MI.getOperand(0).getReg();
152 assert(MI.getNumOperands() == 3 &&
153 MI.getOperand(0).isReg() &&
154 MI.getOperand(1).isReg() &&
155 MI.getOperand(2).isReg() &&
156 "invalid SPU OR(vec|r32|r64|gprc) instruction!");
157 if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) {
158 sourceReg = MI.getOperand(1).getReg();
159 destReg = MI.getOperand(0).getReg();
169 SPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
170 int &FrameIndex) const {
171 switch (MI->getOpcode()) {
182 const MachineOperand MOp1 = MI->getOperand(1);
183 const MachineOperand MOp2 = MI->getOperand(2);
186 || (MOp2.isReg() && MOp2.getReg() == SPU::R1))) {
188 FrameIndex = MOp2.getIndex();
190 FrameIndex = MOp1.getImm() / SPUFrameInfo::stackSlotSize();
191 return MI->getOperand(0).getReg();
200 if (MI->getOperand(1).isReg() && MI->getOperand(2).isReg()
201 && (MI->getOperand(2).getReg() == SPU::R1
202 || MI->getOperand(1).getReg() == SPU::R1)) {
203 FrameIndex = MI->getOperand(2).getIndex();
204 return MI->getOperand(0).getReg();
212 SPUInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
213 int &FrameIndex) const {
214 switch (MI->getOpcode()) {
226 const MachineOperand MOp1 = MI->getOperand(1);
227 const MachineOperand MOp2 = MI->getOperand(2);
228 if (MOp1.isImm() && MOp2.isFI()) {
229 FrameIndex = MOp2.getIndex();
230 return MI->getOperand(0).getReg();
245 if (MI->getOperand(1).isReg() && MI->getOperand(2).isReg()
246 && (MI->getOperand(2).getReg() == SPU::R1
247 || MI->getOperand(1).getReg() == SPU::R1)) {
248 FrameIndex = MI->getOperand(2).getIndex();
249 return MI->getOperand(0).getReg();
257 bool SPUInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
258 MachineBasicBlock::iterator MI,
259 unsigned DestReg, unsigned SrcReg,
260 const TargetRegisterClass *DestRC,
261 const TargetRegisterClass *SrcRC) const
263 // We support cross register class moves for our aliases, such as R3 in any
264 // reg class to any other reg class containing R3. This is required because
265 // we instruction select bitconvert i64 -> f64 as a noop for example, so our
266 // types have no specific meaning.
268 if (DestRC == SPU::R8CRegisterClass) {
269 BuildMI(MBB, MI, get(SPU::ORBIr8), DestReg).addReg(SrcReg).addImm(0);
270 } else if (DestRC == SPU::R16CRegisterClass) {
271 BuildMI(MBB, MI, get(SPU::ORHIr16), DestReg).addReg(SrcReg).addImm(0);
272 } else if (DestRC == SPU::R32CRegisterClass) {
273 BuildMI(MBB, MI, get(SPU::ORIr32), DestReg).addReg(SrcReg).addImm(0);
274 } else if (DestRC == SPU::R32FPRegisterClass) {
275 BuildMI(MBB, MI, get(SPU::ORf32), DestReg).addReg(SrcReg)
277 } else if (DestRC == SPU::R64CRegisterClass) {
278 BuildMI(MBB, MI, get(SPU::ORr64), DestReg).addReg(SrcReg)
280 } else if (DestRC == SPU::R64FPRegisterClass) {
281 BuildMI(MBB, MI, get(SPU::ORf64), DestReg).addReg(SrcReg)
283 } /* else if (DestRC == SPU::GPRCRegisterClass) {
284 BuildMI(MBB, MI, get(SPU::ORgprc), DestReg).addReg(SrcReg)
286 } */ else if (DestRC == SPU::VECREGRegisterClass) {
287 BuildMI(MBB, MI, get(SPU::ORv4i32), DestReg).addReg(SrcReg)
290 // Attempt to copy unknown/unsupported register class!
298 SPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
299 MachineBasicBlock::iterator MI,
300 unsigned SrcReg, bool isKill, int FrameIdx,
301 const TargetRegisterClass *RC) const
304 bool isValidFrameIdx = (FrameIdx < SPUFrameInfo::maxFrameOffset());
305 if (RC == SPU::GPRCRegisterClass) {
306 opc = (isValidFrameIdx ? SPU::STQDr128 : SPU::STQXr128);
307 } else if (RC == SPU::R64CRegisterClass) {
308 opc = (isValidFrameIdx ? SPU::STQDr64 : SPU::STQXr64);
309 } else if (RC == SPU::R64FPRegisterClass) {
310 opc = (isValidFrameIdx ? SPU::STQDr64 : SPU::STQXr64);
311 } else if (RC == SPU::R32CRegisterClass) {
312 opc = (isValidFrameIdx ? SPU::STQDr32 : SPU::STQXr32);
313 } else if (RC == SPU::R32FPRegisterClass) {
314 opc = (isValidFrameIdx ? SPU::STQDr32 : SPU::STQXr32);
315 } else if (RC == SPU::R16CRegisterClass) {
316 opc = (isValidFrameIdx ? SPU::STQDr16 : SPU::STQXr16);
317 } else if (RC == SPU::R8CRegisterClass) {
318 opc = (isValidFrameIdx ? SPU::STQDr8 : SPU::STQXr8);
319 } else if (RC == SPU::VECREGRegisterClass) {
320 opc = (isValidFrameIdx) ? SPU::STQDv16i8 : SPU::STQXv16i8;
322 assert(0 && "Unknown regclass!");
326 addFrameReference(BuildMI(MBB, MI, get(opc))
327 .addReg(SrcReg, false, false, isKill), FrameIdx);
330 void SPUInstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
332 SmallVectorImpl<MachineOperand> &Addr,
333 const TargetRegisterClass *RC,
334 SmallVectorImpl<MachineInstr*> &NewMIs) const {
335 cerr << "storeRegToAddr() invoked!\n";
338 if (Addr[0].isFI()) {
339 /* do what storeRegToStackSlot does here */
342 if (RC == SPU::GPRCRegisterClass) {
343 /* Opc = PPC::STW; */
344 } else if (RC == SPU::R16CRegisterClass) {
345 /* Opc = PPC::STD; */
346 } else if (RC == SPU::R32CRegisterClass) {
347 /* Opc = PPC::STFD; */
348 } else if (RC == SPU::R32FPRegisterClass) {
349 /* Opc = PPC::STFD; */
350 } else if (RC == SPU::R64FPRegisterClass) {
351 /* Opc = PPC::STFS; */
352 } else if (RC == SPU::VECREGRegisterClass) {
353 /* Opc = PPC::STVX; */
355 assert(0 && "Unknown regclass!");
358 MachineInstrBuilder MIB = BuildMI(MF, get(Opc))
359 .addReg(SrcReg, false, false, isKill);
360 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
361 MachineOperand &MO = Addr[i];
363 MIB.addReg(MO.getReg());
365 MIB.addImm(MO.getImm());
367 MIB.addFrameIndex(MO.getIndex());
369 NewMIs.push_back(MIB);
374 SPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
375 MachineBasicBlock::iterator MI,
376 unsigned DestReg, int FrameIdx,
377 const TargetRegisterClass *RC) const
380 bool isValidFrameIdx = (FrameIdx < SPUFrameInfo::maxFrameOffset());
381 if (RC == SPU::GPRCRegisterClass) {
382 opc = (isValidFrameIdx ? SPU::LQDr128 : SPU::LQXr128);
383 } else if (RC == SPU::R64CRegisterClass) {
384 opc = (isValidFrameIdx ? SPU::LQDr64 : SPU::LQXr64);
385 } else if (RC == SPU::R64FPRegisterClass) {
386 opc = (isValidFrameIdx ? SPU::LQDr64 : SPU::LQXr64);
387 } else if (RC == SPU::R32CRegisterClass) {
388 opc = (isValidFrameIdx ? SPU::LQDr32 : SPU::LQXr32);
389 } else if (RC == SPU::R32FPRegisterClass) {
390 opc = (isValidFrameIdx ? SPU::LQDr32 : SPU::LQXr32);
391 } else if (RC == SPU::R16CRegisterClass) {
392 opc = (isValidFrameIdx ? SPU::LQDr16 : SPU::LQXr16);
393 } else if (RC == SPU::R8CRegisterClass) {
394 opc = (isValidFrameIdx ? SPU::LQDr8 : SPU::LQXr8);
395 } else if (RC == SPU::VECREGRegisterClass) {
396 opc = (isValidFrameIdx) ? SPU::LQDv16i8 : SPU::LQXv16i8;
398 assert(0 && "Unknown regclass in loadRegFromStackSlot!");
402 addFrameReference(BuildMI(MBB, MI, get(opc)).addReg(DestReg), FrameIdx);
406 \note We are really pessimistic here about what kind of a load we're doing.
408 void SPUInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
409 SmallVectorImpl<MachineOperand> &Addr,
410 const TargetRegisterClass *RC,
411 SmallVectorImpl<MachineInstr*> &NewMIs)
413 cerr << "loadRegToAddr() invoked!\n";
416 if (Addr[0].isFI()) {
417 /* do what loadRegFromStackSlot does here... */
420 if (RC == SPU::R8CRegisterClass) {
421 /* do brilliance here */
422 } else if (RC == SPU::R16CRegisterClass) {
423 /* Opc = PPC::LWZ; */
424 } else if (RC == SPU::R32CRegisterClass) {
426 } else if (RC == SPU::R32FPRegisterClass) {
427 /* Opc = PPC::LFD; */
428 } else if (RC == SPU::R64FPRegisterClass) {
429 /* Opc = PPC::LFS; */
430 } else if (RC == SPU::VECREGRegisterClass) {
431 /* Opc = PPC::LVX; */
432 } else if (RC == SPU::GPRCRegisterClass) {
433 /* Opc = something else! */
435 assert(0 && "Unknown regclass!");
438 MachineInstrBuilder MIB = BuildMI(MF, get(Opc), DestReg);
439 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
440 MachineOperand &MO = Addr[i];
442 MIB.addReg(MO.getReg());
444 MIB.addImm(MO.getImm());
446 MIB.addFrameIndex(MO.getIndex());
448 NewMIs.push_back(MIB);
452 /// foldMemoryOperand - SPU, like PPC, can only fold spills into
453 /// copy instructions, turning them into load/store instructions.
455 SPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
457 const SmallVectorImpl<unsigned> &Ops,
458 int FrameIndex) const
460 #if SOMEDAY_SCOTT_LOOKS_AT_ME_AGAIN
461 if (Ops.size() != 1) return NULL;
463 unsigned OpNum = Ops[0];
464 unsigned Opc = MI->getOpcode();
465 MachineInstr *NewMI = 0;
467 if ((Opc == SPU::ORr32
468 || Opc == SPU::ORv4i32)
469 && MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
470 if (OpNum == 0) { // move -> store
471 unsigned InReg = MI->getOperand(1).getReg();
472 bool isKill = MI->getOperand(1).isKill();
473 if (FrameIndex < SPUFrameInfo::maxFrameOffset()) {
474 NewMI = addFrameReference(BuildMI(MF, TII.get(SPU::STQDr32))
475 .addReg(InReg, false, false, isKill),
478 } else { // move -> load
479 unsigned OutReg = MI->getOperand(0).getReg();
480 bool isDead = MI->getOperand(0).isDead();
481 Opc = (FrameIndex < SPUFrameInfo::maxFrameOffset())
482 ? SPU::STQDr32 : SPU::STQXr32;
483 NewMI = addFrameReference(BuildMI(MF, TII.get(Opc))
484 .addReg(OutReg, true, false, false, isDead), FrameIndex);
496 \note This code was kiped from PPC. There may be more branch analysis for
497 CellSPU than what's currently done here.
500 SPUInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
501 MachineBasicBlock *&FBB,
502 SmallVectorImpl<MachineOperand> &Cond) const {
503 // If the block has no terminators, it just falls into the block after it.
504 MachineBasicBlock::iterator I = MBB.end();
505 if (I == MBB.begin() || !isUnpredicatedTerminator(--I))
508 // Get the last instruction in the block.
509 MachineInstr *LastInst = I;
511 // If there is only one terminator instruction, process it.
512 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
513 if (isUncondBranch(LastInst)) {
514 TBB = LastInst->getOperand(0).getMBB();
516 } else if (isCondBranch(LastInst)) {
517 // Block ends with fall-through condbranch.
518 TBB = LastInst->getOperand(1).getMBB();
519 Cond.push_back(LastInst->getOperand(0));
520 Cond.push_back(LastInst->getOperand(1));
523 // Otherwise, don't know what this is.
527 // Get the instruction before it if it's a terminator.
528 MachineInstr *SecondLastInst = I;
530 // If there are three terminators, we don't know what sort of block this is.
531 if (SecondLastInst && I != MBB.begin() &&
532 isUnpredicatedTerminator(--I))
535 // If the block ends with a conditional and unconditional branch, handle it.
536 if (isCondBranch(SecondLastInst) && isUncondBranch(LastInst)) {
537 TBB = SecondLastInst->getOperand(1).getMBB();
538 Cond.push_back(SecondLastInst->getOperand(0));
539 Cond.push_back(SecondLastInst->getOperand(1));
540 FBB = LastInst->getOperand(0).getMBB();
544 // If the block ends with two unconditional branches, handle it. The second
545 // one is not executed, so remove it.
546 if (isUncondBranch(SecondLastInst) && isUncondBranch(LastInst)) {
547 TBB = SecondLastInst->getOperand(0).getMBB();
549 I->eraseFromParent();
553 // Otherwise, can't handle this.
558 SPUInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
559 MachineBasicBlock::iterator I = MBB.end();
560 if (I == MBB.begin())
563 if (!isCondBranch(I) && !isUncondBranch(I))
566 // Remove the first branch.
567 I->eraseFromParent();
569 if (I == MBB.begin())
576 // Remove the second branch.
577 I->eraseFromParent();
582 SPUInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
583 MachineBasicBlock *FBB,
584 const SmallVectorImpl<MachineOperand> &Cond) const {
585 // Shouldn't be a fall through.
586 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
587 assert((Cond.size() == 2 || Cond.size() == 0) &&
588 "SPU branch conditions have two components!");
592 if (Cond.empty()) // Unconditional branch
593 BuildMI(&MBB, get(SPU::BR)).addMBB(TBB);
594 else { // Conditional branch
595 /* BuildMI(&MBB, get(SPU::BRNZ))
596 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB); */
597 cerr << "SPUInstrInfo::InsertBranch conditional branch logic needed\n";
603 // Two-way Conditional Branch.
605 BuildMI(&MBB, get(SPU::BRNZ))
606 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB);
607 BuildMI(&MBB, get(SPU::BR)).addMBB(FBB);
609 cerr << "SPUInstrInfo::InsertBranch conditional branch logic needed\n";