1 //===-- SystemZInstrInfo.cpp - SystemZ instruction information ------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the SystemZ implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "SystemZInstrInfo.h"
15 #include "SystemZTargetMachine.h"
16 #include "SystemZInstrBuilder.h"
17 #include "llvm/CodeGen/LiveVariables.h"
18 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #define GET_INSTRINFO_CTOR
21 #define GET_INSTRMAP_INFO
22 #include "SystemZGenInstrInfo.inc"
26 // Return a mask with Count low bits set.
27 static uint64_t allOnes(unsigned int Count) {
28 return Count == 0 ? 0 : (uint64_t(1) << (Count - 1) << 1) - 1;
31 // Reg should be a 32-bit GPR. Return true if it is a high register rather
32 // than a low register.
33 static bool isHighReg(unsigned int Reg) {
34 if (SystemZ::GRH32BitRegClass.contains(Reg))
36 assert(SystemZ::GR32BitRegClass.contains(Reg) && "Invalid GRX32");
40 SystemZInstrInfo::SystemZInstrInfo(SystemZTargetMachine &tm)
41 : SystemZGenInstrInfo(SystemZ::ADJCALLSTACKDOWN, SystemZ::ADJCALLSTACKUP),
45 // MI is a 128-bit load or store. Split it into two 64-bit loads or stores,
46 // each having the opcode given by NewOpcode.
47 void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI,
48 unsigned NewOpcode) const {
49 MachineBasicBlock *MBB = MI->getParent();
50 MachineFunction &MF = *MBB->getParent();
52 // Get two load or store instructions. Use the original instruction for one
53 // of them (arbitarily the second here) and create a clone for the other.
54 MachineInstr *EarlierMI = MF.CloneMachineInstr(MI);
55 MBB->insert(MI, EarlierMI);
57 // Set up the two 64-bit registers.
58 MachineOperand &HighRegOp = EarlierMI->getOperand(0);
59 MachineOperand &LowRegOp = MI->getOperand(0);
60 HighRegOp.setReg(RI.getSubReg(HighRegOp.getReg(), SystemZ::subreg_h64));
61 LowRegOp.setReg(RI.getSubReg(LowRegOp.getReg(), SystemZ::subreg_l64));
63 // The address in the first (high) instruction is already correct.
64 // Adjust the offset in the second (low) instruction.
65 MachineOperand &HighOffsetOp = EarlierMI->getOperand(2);
66 MachineOperand &LowOffsetOp = MI->getOperand(2);
67 LowOffsetOp.setImm(LowOffsetOp.getImm() + 8);
70 unsigned HighOpcode = getOpcodeForOffset(NewOpcode, HighOffsetOp.getImm());
71 unsigned LowOpcode = getOpcodeForOffset(NewOpcode, LowOffsetOp.getImm());
72 assert(HighOpcode && LowOpcode && "Both offsets should be in range");
74 EarlierMI->setDesc(get(HighOpcode));
75 MI->setDesc(get(LowOpcode));
78 // Split ADJDYNALLOC instruction MI.
79 void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI) const {
80 MachineBasicBlock *MBB = MI->getParent();
81 MachineFunction &MF = *MBB->getParent();
82 MachineFrameInfo *MFFrame = MF.getFrameInfo();
83 MachineOperand &OffsetMO = MI->getOperand(2);
85 uint64_t Offset = (MFFrame->getMaxCallFrameSize() +
86 SystemZMC::CallFrameSize +
88 unsigned NewOpcode = getOpcodeForOffset(SystemZ::LA, Offset);
89 assert(NewOpcode && "No support for huge argument lists yet");
90 MI->setDesc(get(NewOpcode));
91 OffsetMO.setImm(Offset);
94 // MI is an RXY-style pseudo instruction. Replace it with LowOpcode
95 // if the first operand is a low GR32 and HighOpcode if the first operand
97 void SystemZInstrInfo::expandRXYPseudo(MachineInstr *MI, unsigned LowOpcode,
98 unsigned HighOpcode) const {
99 unsigned Reg = MI->getOperand(0).getReg();
100 unsigned Opcode = getOpcodeForOffset(isHighReg(Reg) ? HighOpcode : LowOpcode,
101 MI->getOperand(2).getImm());
102 MI->setDesc(get(Opcode));
105 // Emit a zero-extending move from 32-bit GPR SrcReg to 32-bit GPR
106 // DestReg before MBBI in MBB. Use LowLowOpcode when both DestReg and SrcReg
107 // are low registers, otherwise use RISB[LH]G. Size is the number of bits
108 // taken from the low end of SrcReg (8 for LLCR, 16 for LLHR and 32 for LR).
109 // KillSrc is true if this move is the last use of SrcReg.
110 void SystemZInstrInfo::emitGRX32Move(MachineBasicBlock &MBB,
111 MachineBasicBlock::iterator MBBI,
112 DebugLoc DL, unsigned DestReg,
113 unsigned SrcReg, unsigned LowLowOpcode,
114 unsigned Size, bool KillSrc) const {
116 bool DestIsHigh = isHighReg(DestReg);
117 bool SrcIsHigh = isHighReg(SrcReg);
118 if (DestIsHigh && SrcIsHigh)
119 Opcode = SystemZ::RISBHH;
120 else if (DestIsHigh && !SrcIsHigh)
121 Opcode = SystemZ::RISBHL;
122 else if (!DestIsHigh && SrcIsHigh)
123 Opcode = SystemZ::RISBLH;
125 BuildMI(MBB, MBBI, DL, get(LowLowOpcode), DestReg)
126 .addReg(SrcReg, getKillRegState(KillSrc));
129 unsigned Rotate = (DestIsHigh != SrcIsHigh ? 32 : 0);
130 BuildMI(MBB, MBBI, DL, get(Opcode), DestReg)
131 .addReg(DestReg, RegState::Undef)
132 .addReg(SrcReg, getKillRegState(KillSrc))
133 .addImm(32 - Size).addImm(128 + 31).addImm(Rotate);
136 // If MI is a simple load or store for a frame object, return the register
137 // it loads or stores and set FrameIndex to the index of the frame object.
138 // Return 0 otherwise.
140 // Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
141 static int isSimpleMove(const MachineInstr *MI, int &FrameIndex,
143 const MCInstrDesc &MCID = MI->getDesc();
144 if ((MCID.TSFlags & Flag) &&
145 MI->getOperand(1).isFI() &&
146 MI->getOperand(2).getImm() == 0 &&
147 MI->getOperand(3).getReg() == 0) {
148 FrameIndex = MI->getOperand(1).getIndex();
149 return MI->getOperand(0).getReg();
154 unsigned SystemZInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
155 int &FrameIndex) const {
156 return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXLoad);
159 unsigned SystemZInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
160 int &FrameIndex) const {
161 return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXStore);
164 bool SystemZInstrInfo::isStackSlotCopy(const MachineInstr *MI,
166 int &SrcFrameIndex) const {
167 // Check for MVC 0(Length,FI1),0(FI2)
168 const MachineFrameInfo *MFI = MI->getParent()->getParent()->getFrameInfo();
169 if (MI->getOpcode() != SystemZ::MVC ||
170 !MI->getOperand(0).isFI() ||
171 MI->getOperand(1).getImm() != 0 ||
172 !MI->getOperand(3).isFI() ||
173 MI->getOperand(4).getImm() != 0)
176 // Check that Length covers the full slots.
177 int64_t Length = MI->getOperand(2).getImm();
178 unsigned FI1 = MI->getOperand(0).getIndex();
179 unsigned FI2 = MI->getOperand(3).getIndex();
180 if (MFI->getObjectSize(FI1) != Length ||
181 MFI->getObjectSize(FI2) != Length)
184 DestFrameIndex = FI1;
189 bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
190 MachineBasicBlock *&TBB,
191 MachineBasicBlock *&FBB,
192 SmallVectorImpl<MachineOperand> &Cond,
193 bool AllowModify) const {
194 // Most of the code and comments here are boilerplate.
196 // Start from the bottom of the block and work up, examining the
197 // terminator instructions.
198 MachineBasicBlock::iterator I = MBB.end();
199 while (I != MBB.begin()) {
201 if (I->isDebugValue())
204 // Working from the bottom, when we see a non-terminator instruction, we're
206 if (!isUnpredicatedTerminator(I))
209 // A terminator that isn't a branch can't easily be handled by this
214 // Can't handle indirect branches.
215 SystemZII::Branch Branch(getBranchInfo(I));
216 if (!Branch.Target->isMBB())
219 // Punt on compound branches.
220 if (Branch.Type != SystemZII::BranchNormal)
223 if (Branch.CCMask == SystemZ::CCMASK_ANY) {
224 // Handle unconditional branches.
226 TBB = Branch.Target->getMBB();
230 // If the block has any instructions after a JMP, delete them.
231 while (llvm::next(I) != MBB.end())
232 llvm::next(I)->eraseFromParent();
237 // Delete the JMP if it's equivalent to a fall-through.
238 if (MBB.isLayoutSuccessor(Branch.Target->getMBB())) {
240 I->eraseFromParent();
245 // TBB is used to indicate the unconditinal destination.
246 TBB = Branch.Target->getMBB();
250 // Working from the bottom, handle the first conditional branch.
252 // FIXME: add X86-style branch swap
254 TBB = Branch.Target->getMBB();
255 Cond.push_back(MachineOperand::CreateImm(Branch.CCValid));
256 Cond.push_back(MachineOperand::CreateImm(Branch.CCMask));
260 // Handle subsequent conditional branches.
261 assert(Cond.size() == 2 && TBB && "Should have seen a conditional branch");
263 // Only handle the case where all conditional branches branch to the same
265 if (TBB != Branch.Target->getMBB())
268 // If the conditions are the same, we can leave them alone.
269 unsigned OldCCValid = Cond[0].getImm();
270 unsigned OldCCMask = Cond[1].getImm();
271 if (OldCCValid == Branch.CCValid && OldCCMask == Branch.CCMask)
274 // FIXME: Try combining conditions like X86 does. Should be easy on Z!
281 unsigned SystemZInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
282 // Most of the code and comments here are boilerplate.
283 MachineBasicBlock::iterator I = MBB.end();
286 while (I != MBB.begin()) {
288 if (I->isDebugValue())
292 if (!getBranchInfo(I).Target->isMBB())
294 // Remove the branch.
295 I->eraseFromParent();
303 bool SystemZInstrInfo::
304 ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
305 assert(Cond.size() == 2 && "Invalid condition");
306 Cond[1].setImm(Cond[1].getImm() ^ Cond[0].getImm());
311 SystemZInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
312 MachineBasicBlock *FBB,
313 const SmallVectorImpl<MachineOperand> &Cond,
315 // In this function we output 32-bit branches, which should always
316 // have enough range. They can be shortened and relaxed by later code
317 // in the pipeline, if desired.
319 // Shouldn't be a fall through.
320 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
321 assert((Cond.size() == 2 || Cond.size() == 0) &&
322 "SystemZ branch conditions have one component!");
325 // Unconditional branch?
326 assert(!FBB && "Unconditional branch with multiple successors!");
327 BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(TBB);
331 // Conditional branch.
333 unsigned CCValid = Cond[0].getImm();
334 unsigned CCMask = Cond[1].getImm();
335 BuildMI(&MBB, DL, get(SystemZ::BRC))
336 .addImm(CCValid).addImm(CCMask).addMBB(TBB);
340 // Two-way Conditional branch. Insert the second branch.
341 BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(FBB);
347 bool SystemZInstrInfo::analyzeCompare(const MachineInstr *MI,
348 unsigned &SrcReg, unsigned &SrcReg2,
349 int &Mask, int &Value) const {
350 assert(MI->isCompare() && "Caller should have checked for a comparison");
352 if (MI->getNumExplicitOperands() == 2 &&
353 MI->getOperand(0).isReg() &&
354 MI->getOperand(1).isImm()) {
355 SrcReg = MI->getOperand(0).getReg();
357 Value = MI->getOperand(1).getImm();
365 // If Reg is a virtual register, return its definition, otherwise return null.
366 static MachineInstr *getDef(unsigned Reg,
367 const MachineRegisterInfo *MRI) {
368 if (TargetRegisterInfo::isPhysicalRegister(Reg))
370 return MRI->getUniqueVRegDef(Reg);
373 // Return true if MI is a shift of type Opcode by Imm bits.
374 static bool isShift(MachineInstr *MI, int Opcode, int64_t Imm) {
375 return (MI->getOpcode() == Opcode &&
376 !MI->getOperand(2).getReg() &&
377 MI->getOperand(3).getImm() == Imm);
380 // If the destination of MI has no uses, delete it as dead.
381 static void eraseIfDead(MachineInstr *MI, const MachineRegisterInfo *MRI) {
382 if (MRI->use_nodbg_empty(MI->getOperand(0).getReg()))
383 MI->eraseFromParent();
386 // Compare compares SrcReg against zero. Check whether SrcReg contains
387 // the result of an IPM sequence whose input CC survives until Compare,
388 // and whether Compare is therefore redundant. Delete it and return
390 static bool removeIPMBasedCompare(MachineInstr *Compare, unsigned SrcReg,
391 const MachineRegisterInfo *MRI,
392 const TargetRegisterInfo *TRI) {
393 MachineInstr *LGFR = 0;
394 MachineInstr *RLL = getDef(SrcReg, MRI);
395 if (RLL && RLL->getOpcode() == SystemZ::LGFR) {
397 RLL = getDef(LGFR->getOperand(1).getReg(), MRI);
399 if (!RLL || !isShift(RLL, SystemZ::RLL, 31))
402 MachineInstr *SRL = getDef(RLL->getOperand(1).getReg(), MRI);
403 if (!SRL || !isShift(SRL, SystemZ::SRL, 28))
406 MachineInstr *IPM = getDef(SRL->getOperand(1).getReg(), MRI);
407 if (!IPM || IPM->getOpcode() != SystemZ::IPM)
410 // Check that there are no assignments to CC between the IPM and Compare,
411 if (IPM->getParent() != Compare->getParent())
413 MachineBasicBlock::iterator MBBI = IPM, MBBE = Compare;
414 for (++MBBI; MBBI != MBBE; ++MBBI) {
415 MachineInstr *MI = MBBI;
416 if (MI->modifiesRegister(SystemZ::CC, TRI))
420 Compare->eraseFromParent();
422 eraseIfDead(LGFR, MRI);
423 eraseIfDead(RLL, MRI);
424 eraseIfDead(SRL, MRI);
425 eraseIfDead(IPM, MRI);
431 SystemZInstrInfo::optimizeCompareInstr(MachineInstr *Compare,
432 unsigned SrcReg, unsigned SrcReg2,
434 const MachineRegisterInfo *MRI) const {
435 assert(!SrcReg2 && "Only optimizing constant comparisons so far");
436 bool IsLogical = (Compare->getDesc().TSFlags & SystemZII::IsLogical) != 0;
439 removeIPMBasedCompare(Compare, SrcReg, MRI, TM.getRegisterInfo()))
444 // If Opcode is a move that has a conditional variant, return that variant,
445 // otherwise return 0.
446 static unsigned getConditionalMove(unsigned Opcode) {
448 case SystemZ::LR: return SystemZ::LOCR;
449 case SystemZ::LGR: return SystemZ::LOCGR;
454 bool SystemZInstrInfo::isPredicable(MachineInstr *MI) const {
455 unsigned Opcode = MI->getOpcode();
456 if (TM.getSubtargetImpl()->hasLoadStoreOnCond() &&
457 getConditionalMove(Opcode))
462 bool SystemZInstrInfo::
463 isProfitableToIfCvt(MachineBasicBlock &MBB,
464 unsigned NumCycles, unsigned ExtraPredCycles,
465 const BranchProbability &Probability) const {
466 // For now only convert single instructions.
467 return NumCycles == 1;
470 bool SystemZInstrInfo::
471 isProfitableToIfCvt(MachineBasicBlock &TMBB,
472 unsigned NumCyclesT, unsigned ExtraPredCyclesT,
473 MachineBasicBlock &FMBB,
474 unsigned NumCyclesF, unsigned ExtraPredCyclesF,
475 const BranchProbability &Probability) const {
476 // For now avoid converting mutually-exclusive cases.
480 bool SystemZInstrInfo::
481 PredicateInstruction(MachineInstr *MI,
482 const SmallVectorImpl<MachineOperand> &Pred) const {
483 assert(Pred.size() == 2 && "Invalid condition");
484 unsigned CCValid = Pred[0].getImm();
485 unsigned CCMask = Pred[1].getImm();
486 assert(CCMask > 0 && CCMask < 15 && "Invalid predicate");
487 unsigned Opcode = MI->getOpcode();
488 if (TM.getSubtargetImpl()->hasLoadStoreOnCond()) {
489 if (unsigned CondOpcode = getConditionalMove(Opcode)) {
490 MI->setDesc(get(CondOpcode));
491 MachineInstrBuilder(*MI->getParent()->getParent(), MI)
492 .addImm(CCValid).addImm(CCMask)
493 .addReg(SystemZ::CC, RegState::Implicit);;
501 SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
502 MachineBasicBlock::iterator MBBI, DebugLoc DL,
503 unsigned DestReg, unsigned SrcReg,
504 bool KillSrc) const {
505 // Split 128-bit GPR moves into two 64-bit moves. This handles ADDR128 too.
506 if (SystemZ::GR128BitRegClass.contains(DestReg, SrcReg)) {
507 copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_h64),
508 RI.getSubReg(SrcReg, SystemZ::subreg_h64), KillSrc);
509 copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_l64),
510 RI.getSubReg(SrcReg, SystemZ::subreg_l64), KillSrc);
514 if (SystemZ::GRX32BitRegClass.contains(DestReg, SrcReg)) {
515 emitGRX32Move(MBB, MBBI, DL, DestReg, SrcReg, SystemZ::LR, 32, KillSrc);
519 // Everything else needs only one instruction.
521 if (SystemZ::GR64BitRegClass.contains(DestReg, SrcReg))
522 Opcode = SystemZ::LGR;
523 else if (SystemZ::FP32BitRegClass.contains(DestReg, SrcReg))
524 Opcode = SystemZ::LER;
525 else if (SystemZ::FP64BitRegClass.contains(DestReg, SrcReg))
526 Opcode = SystemZ::LDR;
527 else if (SystemZ::FP128BitRegClass.contains(DestReg, SrcReg))
528 Opcode = SystemZ::LXR;
530 llvm_unreachable("Impossible reg-to-reg copy");
532 BuildMI(MBB, MBBI, DL, get(Opcode), DestReg)
533 .addReg(SrcReg, getKillRegState(KillSrc));
537 SystemZInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
538 MachineBasicBlock::iterator MBBI,
539 unsigned SrcReg, bool isKill,
541 const TargetRegisterClass *RC,
542 const TargetRegisterInfo *TRI) const {
543 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
545 // Callers may expect a single instruction, so keep 128-bit moves
546 // together for now and lower them after register allocation.
547 unsigned LoadOpcode, StoreOpcode;
548 getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
549 addFrameReference(BuildMI(MBB, MBBI, DL, get(StoreOpcode))
550 .addReg(SrcReg, getKillRegState(isKill)), FrameIdx);
554 SystemZInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
555 MachineBasicBlock::iterator MBBI,
556 unsigned DestReg, int FrameIdx,
557 const TargetRegisterClass *RC,
558 const TargetRegisterInfo *TRI) const {
559 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
561 // Callers may expect a single instruction, so keep 128-bit moves
562 // together for now and lower them after register allocation.
563 unsigned LoadOpcode, StoreOpcode;
564 getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
565 addFrameReference(BuildMI(MBB, MBBI, DL, get(LoadOpcode), DestReg),
569 // Return true if MI is a simple load or store with a 12-bit displacement
570 // and no index. Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
571 static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag) {
572 const MCInstrDesc &MCID = MI->getDesc();
573 return ((MCID.TSFlags & Flag) &&
574 isUInt<12>(MI->getOperand(2).getImm()) &&
575 MI->getOperand(3).getReg() == 0);
580 LogicOp() : RegSize(0), ImmLSB(0), ImmSize(0) {}
581 LogicOp(unsigned regSize, unsigned immLSB, unsigned immSize)
582 : RegSize(regSize), ImmLSB(immLSB), ImmSize(immSize) {}
584 operator bool() const { return RegSize; }
586 unsigned RegSize, ImmLSB, ImmSize;
590 static LogicOp interpretAndImmediate(unsigned Opcode) {
592 case SystemZ::NILL: return LogicOp(32, 0, 16);
593 case SystemZ::NILH: return LogicOp(32, 16, 16);
594 case SystemZ::NILL64: return LogicOp(64, 0, 16);
595 case SystemZ::NILH64: return LogicOp(64, 16, 16);
596 case SystemZ::NIHL: return LogicOp(64, 32, 16);
597 case SystemZ::NIHH: return LogicOp(64, 48, 16);
598 case SystemZ::NILF: return LogicOp(32, 0, 32);
599 case SystemZ::NILF64: return LogicOp(64, 0, 32);
600 case SystemZ::NIHF: return LogicOp(64, 32, 32);
601 default: return LogicOp();
605 // Used to return from convertToThreeAddress after replacing two-address
606 // instruction OldMI with three-address instruction NewMI.
607 static MachineInstr *finishConvertToThreeAddress(MachineInstr *OldMI,
611 unsigned NumOps = OldMI->getNumOperands();
612 for (unsigned I = 1; I < NumOps; ++I) {
613 MachineOperand &Op = OldMI->getOperand(I);
614 if (Op.isReg() && Op.isKill())
615 LV->replaceKillInstruction(Op.getReg(), OldMI, NewMI);
622 SystemZInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
623 MachineBasicBlock::iterator &MBBI,
624 LiveVariables *LV) const {
625 MachineInstr *MI = MBBI;
626 MachineBasicBlock *MBB = MI->getParent();
628 unsigned Opcode = MI->getOpcode();
629 unsigned NumOps = MI->getNumOperands();
631 // Try to convert something like SLL into SLLK, if supported.
632 // We prefer to keep the two-operand form where possible both
633 // because it tends to be shorter and because some instructions
634 // have memory forms that can be used during spilling.
635 if (TM.getSubtargetImpl()->hasDistinctOps()) {
636 int ThreeOperandOpcode = SystemZ::getThreeOperandOpcode(Opcode);
637 if (ThreeOperandOpcode >= 0) {
638 MachineOperand &Dest = MI->getOperand(0);
639 MachineOperand &Src = MI->getOperand(1);
640 MachineInstrBuilder MIB =
641 BuildMI(*MBB, MBBI, MI->getDebugLoc(), get(ThreeOperandOpcode))
643 // Keep the kill state, but drop the tied flag.
644 MIB.addReg(Src.getReg(), getKillRegState(Src.isKill()), Src.getSubReg());
645 // Keep the remaining operands as-is.
646 for (unsigned I = 2; I < NumOps; ++I)
647 MIB.addOperand(MI->getOperand(I));
648 return finishConvertToThreeAddress(MI, MIB, LV);
652 // Try to convert an AND into an RISBG-type instruction.
653 if (LogicOp And = interpretAndImmediate(Opcode)) {
655 if (And.RegSize == 64)
656 NewOpcode = SystemZ::RISBG;
657 else if (TM.getSubtargetImpl()->hasHighWord())
658 NewOpcode = SystemZ::RISBLL;
660 // We can't use RISBG for 32-bit operations because it clobbers the
661 // high word of the destination too.
664 uint64_t Imm = MI->getOperand(2).getImm() << And.ImmLSB;
665 // AND IMMEDIATE leaves the other bits of the register unchanged.
666 Imm |= allOnes(And.RegSize) & ~(allOnes(And.ImmSize) << And.ImmLSB);
668 if (isRxSBGMask(Imm, And.RegSize, Start, End)) {
669 if (NewOpcode == SystemZ::RISBLL) {
673 MachineOperand &Dest = MI->getOperand(0);
674 MachineOperand &Src = MI->getOperand(1);
675 MachineInstrBuilder MIB =
676 BuildMI(*MBB, MI, MI->getDebugLoc(), get(NewOpcode))
677 .addOperand(Dest).addReg(0)
678 .addReg(Src.getReg(), getKillRegState(Src.isKill()), Src.getSubReg())
679 .addImm(Start).addImm(End + 128).addImm(0);
680 return finishConvertToThreeAddress(MI, MIB, LV);
688 SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
690 const SmallVectorImpl<unsigned> &Ops,
691 int FrameIndex) const {
692 const MachineFrameInfo *MFI = MF.getFrameInfo();
693 unsigned Size = MFI->getObjectSize(FrameIndex);
695 // Eary exit for cases we don't care about
699 unsigned OpNum = Ops[0];
700 assert(Size == MF.getRegInfo()
701 .getRegClass(MI->getOperand(OpNum).getReg())->getSize() &&
702 "Invalid size combination");
704 unsigned Opcode = MI->getOpcode();
705 if (Opcode == SystemZ::LGDR || Opcode == SystemZ::LDGR) {
706 bool Op0IsGPR = (Opcode == SystemZ::LGDR);
707 bool Op1IsGPR = (Opcode == SystemZ::LDGR);
708 // If we're spilling the destination of an LDGR or LGDR, store the
709 // source register instead.
711 unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD;
712 return BuildMI(MF, MI->getDebugLoc(), get(StoreOpcode))
713 .addOperand(MI->getOperand(1)).addFrameIndex(FrameIndex)
714 .addImm(0).addReg(0);
716 // If we're spilling the source of an LDGR or LGDR, load the
717 // destination register instead.
719 unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD;
720 unsigned Dest = MI->getOperand(0).getReg();
721 return BuildMI(MF, MI->getDebugLoc(), get(LoadOpcode), Dest)
722 .addFrameIndex(FrameIndex).addImm(0).addReg(0);
726 // Look for cases where the source of a simple store or the destination
727 // of a simple load is being spilled. Try to use MVC instead.
729 // Although MVC is in practice a fast choice in these cases, it is still
730 // logically a bytewise copy. This means that we cannot use it if the
731 // load or store is volatile. We also wouldn't be able to use MVC if
732 // the two memories partially overlap, but that case cannot occur here,
733 // because we know that one of the memories is a full frame index.
735 // For performance reasons, we also want to avoid using MVC if the addresses
736 // might be equal. We don't worry about that case here, because spill slot
737 // coloring happens later, and because we have special code to remove
738 // MVCs that turn out to be redundant.
739 if (OpNum == 0 && MI->hasOneMemOperand()) {
740 MachineMemOperand *MMO = *MI->memoperands_begin();
741 if (MMO->getSize() == Size && !MMO->isVolatile()) {
742 // Handle conversion of loads.
743 if (isSimpleBD12Move(MI, SystemZII::SimpleBDXLoad)) {
744 return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC))
745 .addFrameIndex(FrameIndex).addImm(0).addImm(Size)
746 .addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm())
749 // Handle conversion of stores.
750 if (isSimpleBD12Move(MI, SystemZII::SimpleBDXStore)) {
751 return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC))
752 .addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm())
753 .addImm(Size).addFrameIndex(FrameIndex).addImm(0)
759 // If the spilled operand is the final one, try to change <INSN>R
761 int MemOpcode = SystemZ::getMemOpcode(Opcode);
762 if (MemOpcode >= 0) {
763 unsigned NumOps = MI->getNumExplicitOperands();
764 if (OpNum == NumOps - 1) {
765 const MCInstrDesc &MemDesc = get(MemOpcode);
766 uint64_t AccessBytes = SystemZII::getAccessSize(MemDesc.TSFlags);
767 assert(AccessBytes != 0 && "Size of access should be known");
768 assert(AccessBytes <= Size && "Access outside the frame index");
769 uint64_t Offset = Size - AccessBytes;
770 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(MemOpcode));
771 for (unsigned I = 0; I < OpNum; ++I)
772 MIB.addOperand(MI->getOperand(I));
773 MIB.addFrameIndex(FrameIndex).addImm(Offset);
774 if (MemDesc.TSFlags & SystemZII::HasIndex)
784 SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI,
785 const SmallVectorImpl<unsigned> &Ops,
786 MachineInstr* LoadMI) const {
791 SystemZInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
792 switch (MI->getOpcode()) {
794 splitMove(MI, SystemZ::LG);
798 splitMove(MI, SystemZ::STG);
802 splitMove(MI, SystemZ::LD);
806 splitMove(MI, SystemZ::STD);
810 expandRXYPseudo(MI, SystemZ::LB, SystemZ::LBH);
814 expandRXYPseudo(MI, SystemZ::LH, SystemZ::LHH);
817 case SystemZ::LLCMux:
818 expandRXYPseudo(MI, SystemZ::LLC, SystemZ::LLCH);
821 case SystemZ::LLHMux:
822 expandRXYPseudo(MI, SystemZ::LLH, SystemZ::LLHH);
826 expandRXYPseudo(MI, SystemZ::L, SystemZ::LFH);
829 case SystemZ::STCMux:
830 expandRXYPseudo(MI, SystemZ::STC, SystemZ::STCH);
833 case SystemZ::STHMux:
834 expandRXYPseudo(MI, SystemZ::STH, SystemZ::STHH);
838 expandRXYPseudo(MI, SystemZ::ST, SystemZ::STFH);
841 case SystemZ::ADJDYNALLOC:
842 splitAdjDynAlloc(MI);
850 uint64_t SystemZInstrInfo::getInstSizeInBytes(const MachineInstr *MI) const {
851 if (MI->getOpcode() == TargetOpcode::INLINEASM) {
852 const MachineFunction *MF = MI->getParent()->getParent();
853 const char *AsmStr = MI->getOperand(0).getSymbolName();
854 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
856 return MI->getDesc().getSize();
860 SystemZInstrInfo::getBranchInfo(const MachineInstr *MI) const {
861 switch (MI->getOpcode()) {
865 return SystemZII::Branch(SystemZII::BranchNormal, SystemZ::CCMASK_ANY,
866 SystemZ::CCMASK_ANY, &MI->getOperand(0));
870 return SystemZII::Branch(SystemZII::BranchNormal,
871 MI->getOperand(0).getImm(),
872 MI->getOperand(1).getImm(), &MI->getOperand(2));
875 return SystemZII::Branch(SystemZII::BranchCT, SystemZ::CCMASK_ICMP,
876 SystemZ::CCMASK_CMP_NE, &MI->getOperand(2));
879 return SystemZII::Branch(SystemZII::BranchCTG, SystemZ::CCMASK_ICMP,
880 SystemZ::CCMASK_CMP_NE, &MI->getOperand(2));
884 return SystemZII::Branch(SystemZII::BranchC, SystemZ::CCMASK_ICMP,
885 MI->getOperand(2).getImm(), &MI->getOperand(3));
889 return SystemZII::Branch(SystemZII::BranchCL, SystemZ::CCMASK_ICMP,
890 MI->getOperand(2).getImm(), &MI->getOperand(3));
894 return SystemZII::Branch(SystemZII::BranchCG, SystemZ::CCMASK_ICMP,
895 MI->getOperand(2).getImm(), &MI->getOperand(3));
899 return SystemZII::Branch(SystemZII::BranchCLG, SystemZ::CCMASK_ICMP,
900 MI->getOperand(2).getImm(), &MI->getOperand(3));
903 llvm_unreachable("Unrecognized branch opcode");
907 void SystemZInstrInfo::getLoadStoreOpcodes(const TargetRegisterClass *RC,
908 unsigned &LoadOpcode,
909 unsigned &StoreOpcode) const {
910 if (RC == &SystemZ::GR32BitRegClass || RC == &SystemZ::ADDR32BitRegClass) {
911 LoadOpcode = SystemZ::L;
912 StoreOpcode = SystemZ::ST;
913 } else if (RC == &SystemZ::GRH32BitRegClass) {
914 LoadOpcode = SystemZ::LFH;
915 StoreOpcode = SystemZ::STFH;
916 } else if (RC == &SystemZ::GRX32BitRegClass) {
917 LoadOpcode = SystemZ::LMux;
918 StoreOpcode = SystemZ::STMux;
919 } else if (RC == &SystemZ::GR64BitRegClass ||
920 RC == &SystemZ::ADDR64BitRegClass) {
921 LoadOpcode = SystemZ::LG;
922 StoreOpcode = SystemZ::STG;
923 } else if (RC == &SystemZ::GR128BitRegClass ||
924 RC == &SystemZ::ADDR128BitRegClass) {
925 LoadOpcode = SystemZ::L128;
926 StoreOpcode = SystemZ::ST128;
927 } else if (RC == &SystemZ::FP32BitRegClass) {
928 LoadOpcode = SystemZ::LE;
929 StoreOpcode = SystemZ::STE;
930 } else if (RC == &SystemZ::FP64BitRegClass) {
931 LoadOpcode = SystemZ::LD;
932 StoreOpcode = SystemZ::STD;
933 } else if (RC == &SystemZ::FP128BitRegClass) {
934 LoadOpcode = SystemZ::LX;
935 StoreOpcode = SystemZ::STX;
937 llvm_unreachable("Unsupported regclass to load or store");
940 unsigned SystemZInstrInfo::getOpcodeForOffset(unsigned Opcode,
941 int64_t Offset) const {
942 const MCInstrDesc &MCID = get(Opcode);
943 int64_t Offset2 = (MCID.TSFlags & SystemZII::Is128Bit ? Offset + 8 : Offset);
944 if (isUInt<12>(Offset) && isUInt<12>(Offset2)) {
945 // Get the instruction to use for unsigned 12-bit displacements.
946 int Disp12Opcode = SystemZ::getDisp12Opcode(Opcode);
947 if (Disp12Opcode >= 0)
950 // All address-related instructions can use unsigned 12-bit
954 if (isInt<20>(Offset) && isInt<20>(Offset2)) {
955 // Get the instruction to use for signed 20-bit displacements.
956 int Disp20Opcode = SystemZ::getDisp20Opcode(Opcode);
957 if (Disp20Opcode >= 0)
960 // Check whether Opcode allows signed 20-bit displacements.
961 if (MCID.TSFlags & SystemZII::Has20BitOffset)
967 unsigned SystemZInstrInfo::getLoadAndTest(unsigned Opcode) const {
969 case SystemZ::L: return SystemZ::LT;
970 case SystemZ::LY: return SystemZ::LT;
971 case SystemZ::LG: return SystemZ::LTG;
972 case SystemZ::LGF: return SystemZ::LTGF;
973 case SystemZ::LR: return SystemZ::LTR;
974 case SystemZ::LGFR: return SystemZ::LTGFR;
975 case SystemZ::LGR: return SystemZ::LTGR;
976 case SystemZ::LER: return SystemZ::LTEBR;
977 case SystemZ::LDR: return SystemZ::LTDBR;
978 case SystemZ::LXR: return SystemZ::LTXBR;
983 // Return true if Mask matches the regexp 0*1+0*, given that zero masks
984 // have already been filtered out. Store the first set bit in LSB and
985 // the number of set bits in Length if so.
986 static bool isStringOfOnes(uint64_t Mask, unsigned &LSB, unsigned &Length) {
987 unsigned First = findFirstSet(Mask);
988 uint64_t Top = (Mask >> First) + 1;
989 if ((Top & -Top) == Top) {
991 Length = findFirstSet(Top);
997 bool SystemZInstrInfo::isRxSBGMask(uint64_t Mask, unsigned BitSize,
998 unsigned &Start, unsigned &End) const {
999 // Reject trivial all-zero masks.
1003 // Handle the 1+0+ or 0+1+0* cases. Start then specifies the index of
1004 // the msb and End specifies the index of the lsb.
1005 unsigned LSB, Length;
1006 if (isStringOfOnes(Mask, LSB, Length)) {
1007 Start = 63 - (LSB + Length - 1);
1012 // Handle the wrap-around 1+0+1+ cases. Start then specifies the msb
1013 // of the low 1s and End specifies the lsb of the high 1s.
1014 if (isStringOfOnes(Mask ^ allOnes(BitSize), LSB, Length)) {
1015 assert(LSB > 0 && "Bottom bit must be set");
1016 assert(LSB + Length < BitSize && "Top bit must be set");
1017 Start = 63 - (LSB - 1);
1018 End = 63 - (LSB + Length);
1025 unsigned SystemZInstrInfo::getCompareAndBranch(unsigned Opcode,
1026 const MachineInstr *MI) const {
1029 return SystemZ::CRJ;
1031 return SystemZ::CGRJ;
1033 return MI && isInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CIJ : 0;
1035 return MI && isInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CGIJ : 0;
1037 return SystemZ::CLRJ;
1039 return SystemZ::CLGRJ;
1041 return MI && isUInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CLIJ : 0;
1042 case SystemZ::CLGFI:
1043 return MI && isUInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CLGIJ : 0;
1049 void SystemZInstrInfo::loadImmediate(MachineBasicBlock &MBB,
1050 MachineBasicBlock::iterator MBBI,
1051 unsigned Reg, uint64_t Value) const {
1052 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
1054 if (isInt<16>(Value))
1055 Opcode = SystemZ::LGHI;
1056 else if (SystemZ::isImmLL(Value))
1057 Opcode = SystemZ::LLILL;
1058 else if (SystemZ::isImmLH(Value)) {
1059 Opcode = SystemZ::LLILH;
1062 assert(isInt<32>(Value) && "Huge values not handled yet");
1063 Opcode = SystemZ::LGFI;
1065 BuildMI(MBB, MBBI, DL, get(Opcode), Reg).addImm(Value);