1 //===-- MipsSEFrameLowering.cpp - Mips32/64 Frame Information -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the Mips32/64 implementation of TargetFrameLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "MipsSEFrameLowering.h"
15 #include "MCTargetDesc/MipsBaseInfo.h"
16 #include "MipsAnalyzeImmediate.h"
17 #include "MipsMachineFunction.h"
18 #include "MipsSEInstrInfo.h"
19 #include "MipsSubtarget.h"
20 #include "llvm/ADT/StringSwitch.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineModuleInfo.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/RegisterScavenging.h"
27 #include "llvm/IR/DataLayout.h"
28 #include "llvm/IR/Function.h"
29 #include "llvm/Support/CommandLine.h"
30 #include "llvm/Target/TargetOptions.h"
35 typedef MachineBasicBlock::iterator Iter;
37 static std::pair<unsigned, unsigned> getMFHiLoOpc(unsigned Src) {
38 if (Mips::ACC64RegClass.contains(Src))
39 return std::make_pair((unsigned)Mips::PseudoMFHI,
40 (unsigned)Mips::PseudoMFLO);
42 if (Mips::ACC64DSPRegClass.contains(Src))
43 return std::make_pair((unsigned)Mips::MFHI_DSP, (unsigned)Mips::MFLO_DSP);
45 if (Mips::ACC128RegClass.contains(Src))
46 return std::make_pair((unsigned)Mips::PseudoMFHI64,
47 (unsigned)Mips::PseudoMFLO64);
49 return std::make_pair(0, 0);
52 /// Helper class to expand pseudos.
55 ExpandPseudo(MachineFunction &MF);
59 bool expandInstr(MachineBasicBlock &MBB, Iter I);
60 void expandLoadCCond(MachineBasicBlock &MBB, Iter I);
61 void expandStoreCCond(MachineBasicBlock &MBB, Iter I);
62 void expandLoadACC(MachineBasicBlock &MBB, Iter I, unsigned RegSize);
63 void expandStoreACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc,
64 unsigned MFLoOpc, unsigned RegSize);
65 bool expandCopy(MachineBasicBlock &MBB, Iter I);
66 bool expandCopyACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc,
68 bool expandBuildPairF64(MachineBasicBlock &MBB,
69 MachineBasicBlock::iterator I, bool FP64) const;
70 bool expandExtractElementF64(MachineBasicBlock &MBB,
71 MachineBasicBlock::iterator I, bool FP64) const;
74 MachineRegisterInfo &MRI;
75 const MipsSubtarget &Subtarget;
76 const MipsSEInstrInfo &TII;
77 const MipsRegisterInfo &RegInfo;
81 ExpandPseudo::ExpandPseudo(MachineFunction &MF_)
82 : MF(MF_), MRI(MF.getRegInfo()),
83 Subtarget(static_cast<const MipsSubtarget &>(MF.getSubtarget())),
84 TII(*static_cast<const MipsSEInstrInfo *>(Subtarget.getInstrInfo())),
85 RegInfo(*Subtarget.getRegisterInfo()) {}
87 bool ExpandPseudo::expand() {
88 bool Expanded = false;
90 for (MachineFunction::iterator BB = MF.begin(), BBEnd = MF.end();
92 for (Iter I = BB->begin(), End = BB->end(); I != End;)
93 Expanded |= expandInstr(*BB, I++);
98 bool ExpandPseudo::expandInstr(MachineBasicBlock &MBB, Iter I) {
99 switch(I->getOpcode()) {
100 case Mips::LOAD_CCOND_DSP:
101 expandLoadCCond(MBB, I);
103 case Mips::STORE_CCOND_DSP:
104 expandStoreCCond(MBB, I);
106 case Mips::LOAD_ACC64:
107 case Mips::LOAD_ACC64DSP:
108 expandLoadACC(MBB, I, 4);
110 case Mips::LOAD_ACC128:
111 expandLoadACC(MBB, I, 8);
113 case Mips::STORE_ACC64:
114 expandStoreACC(MBB, I, Mips::PseudoMFHI, Mips::PseudoMFLO, 4);
116 case Mips::STORE_ACC64DSP:
117 expandStoreACC(MBB, I, Mips::MFHI_DSP, Mips::MFLO_DSP, 4);
119 case Mips::STORE_ACC128:
120 expandStoreACC(MBB, I, Mips::PseudoMFHI64, Mips::PseudoMFLO64, 8);
122 case Mips::BuildPairF64:
123 if (expandBuildPairF64(MBB, I, false))
126 case Mips::BuildPairF64_64:
127 if (expandBuildPairF64(MBB, I, true))
130 case Mips::ExtractElementF64:
131 if (expandExtractElementF64(MBB, I, false))
134 case Mips::ExtractElementF64_64:
135 if (expandExtractElementF64(MBB, I, true))
138 case TargetOpcode::COPY:
139 if (!expandCopy(MBB, I))
150 void ExpandPseudo::expandLoadCCond(MachineBasicBlock &MBB, Iter I) {
154 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
156 const TargetRegisterClass *RC = RegInfo.intRegClass(4);
157 unsigned VR = MRI.createVirtualRegister(RC);
158 unsigned Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
160 TII.loadRegFromStack(MBB, I, VR, FI, RC, &RegInfo, 0);
161 BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), Dst)
162 .addReg(VR, RegState::Kill);
165 void ExpandPseudo::expandStoreCCond(MachineBasicBlock &MBB, Iter I) {
169 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
171 const TargetRegisterClass *RC = RegInfo.intRegClass(4);
172 unsigned VR = MRI.createVirtualRegister(RC);
173 unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
175 BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), VR)
176 .addReg(Src, getKillRegState(I->getOperand(0).isKill()));
177 TII.storeRegToStack(MBB, I, VR, true, FI, RC, &RegInfo, 0);
180 void ExpandPseudo::expandLoadACC(MachineBasicBlock &MBB, Iter I,
187 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
189 const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize);
190 unsigned VR0 = MRI.createVirtualRegister(RC);
191 unsigned VR1 = MRI.createVirtualRegister(RC);
192 unsigned Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
193 unsigned Lo = RegInfo.getSubReg(Dst, Mips::sub_lo);
194 unsigned Hi = RegInfo.getSubReg(Dst, Mips::sub_hi);
195 DebugLoc DL = I->getDebugLoc();
196 const MCInstrDesc &Desc = TII.get(TargetOpcode::COPY);
198 TII.loadRegFromStack(MBB, I, VR0, FI, RC, &RegInfo, 0);
199 BuildMI(MBB, I, DL, Desc, Lo).addReg(VR0, RegState::Kill);
200 TII.loadRegFromStack(MBB, I, VR1, FI, RC, &RegInfo, RegSize);
201 BuildMI(MBB, I, DL, Desc, Hi).addReg(VR1, RegState::Kill);
204 void ExpandPseudo::expandStoreACC(MachineBasicBlock &MBB, Iter I,
205 unsigned MFHiOpc, unsigned MFLoOpc,
210 // store $vr1, FI + 4
212 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
214 const TargetRegisterClass *RC = RegInfo.intRegClass(RegSize);
215 unsigned VR0 = MRI.createVirtualRegister(RC);
216 unsigned VR1 = MRI.createVirtualRegister(RC);
217 unsigned Src = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
218 unsigned SrcKill = getKillRegState(I->getOperand(0).isKill());
219 DebugLoc DL = I->getDebugLoc();
221 BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src);
222 TII.storeRegToStack(MBB, I, VR0, true, FI, RC, &RegInfo, 0);
223 BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill);
224 TII.storeRegToStack(MBB, I, VR1, true, FI, RC, &RegInfo, RegSize);
227 bool ExpandPseudo::expandCopy(MachineBasicBlock &MBB, Iter I) {
228 unsigned Src = I->getOperand(1).getReg();
229 std::pair<unsigned, unsigned> Opcodes = getMFHiLoOpc(Src);
234 return expandCopyACC(MBB, I, Opcodes.first, Opcodes.second);
237 bool ExpandPseudo::expandCopyACC(MachineBasicBlock &MBB, Iter I,
238 unsigned MFHiOpc, unsigned MFLoOpc) {
244 unsigned Dst = I->getOperand(0).getReg(), Src = I->getOperand(1).getReg();
245 unsigned VRegSize = RegInfo.getMinimalPhysRegClass(Dst)->getSize() / 2;
246 const TargetRegisterClass *RC = RegInfo.intRegClass(VRegSize);
247 unsigned VR0 = MRI.createVirtualRegister(RC);
248 unsigned VR1 = MRI.createVirtualRegister(RC);
249 unsigned SrcKill = getKillRegState(I->getOperand(1).isKill());
250 unsigned DstLo = RegInfo.getSubReg(Dst, Mips::sub_lo);
251 unsigned DstHi = RegInfo.getSubReg(Dst, Mips::sub_hi);
252 DebugLoc DL = I->getDebugLoc();
254 BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src);
255 BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstLo)
256 .addReg(VR0, RegState::Kill);
257 BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill);
258 BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstHi)
259 .addReg(VR1, RegState::Kill);
263 /// This method expands the same instruction that MipsSEInstrInfo::
264 /// expandBuildPairF64 does, for the case when ABI is fpxx and mthc1 is not
265 /// available and the case where the ABI is FP64A. It is implemented here
266 /// because frame indexes are eliminated before MipsSEInstrInfo::
267 /// expandBuildPairF64 is called.
268 bool ExpandPseudo::expandBuildPairF64(MachineBasicBlock &MBB,
269 MachineBasicBlock::iterator I,
271 // For fpxx and when mthc1 is not available, use:
272 // spill + reload via ldc1
274 // The case where dmtc1 is available doesn't need to be handled here
275 // because it never creates a BuildPairF64 node.
277 // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence
278 // for odd-numbered double precision values (because the lower 32-bits is
279 // transferred with mtc1 which is redirected to the upper half of the even
280 // register). Unfortunately, we have to make this decision before register
281 // allocation so for now we use a spill/reload sequence for all
282 // double-precision values in regardless of being an odd/even register.
283 if ((Subtarget.isABI_FPXX() && !Subtarget.hasMTHC1()) ||
284 (FP64 && !Subtarget.useOddSPReg())) {
285 unsigned DstReg = I->getOperand(0).getReg();
286 unsigned LoReg = I->getOperand(1).getReg();
287 unsigned HiReg = I->getOperand(2).getReg();
289 // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are
290 // the cases where mthc1 is not available). 64-bit architectures and
291 // MIPS32r2 or later can use FGR64 though.
292 assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() ||
293 !Subtarget.isFP64bit());
295 const TargetRegisterClass *RC = &Mips::GPR32RegClass;
296 const TargetRegisterClass *RC2 =
297 FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass;
299 // We re-use the same spill slot each time so that the stack frame doesn't
300 // grow too much in functions with a large number of moves.
301 int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(RC2);
302 if (!Subtarget.isLittle())
303 std::swap(LoReg, HiReg);
304 TII.storeRegToStack(MBB, I, LoReg, I->getOperand(1).isKill(), FI, RC,
306 TII.storeRegToStack(MBB, I, HiReg, I->getOperand(2).isKill(), FI, RC,
308 TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, 0);
315 /// This method expands the same instruction that MipsSEInstrInfo::
316 /// expandExtractElementF64 does, for the case when ABI is fpxx and mfhc1 is not
317 /// available and the case where the ABI is FP64A. It is implemented here
318 /// because frame indexes are eliminated before MipsSEInstrInfo::
319 /// expandExtractElementF64 is called.
320 bool ExpandPseudo::expandExtractElementF64(MachineBasicBlock &MBB,
321 MachineBasicBlock::iterator I,
323 const MachineOperand &Op1 = I->getOperand(1);
324 const MachineOperand &Op2 = I->getOperand(2);
326 if ((Op1.isReg() && Op1.isUndef()) || (Op2.isReg() && Op2.isUndef())) {
327 unsigned DstReg = I->getOperand(0).getReg();
328 BuildMI(MBB, I, I->getDebugLoc(), TII.get(Mips::IMPLICIT_DEF), DstReg);
332 // For fpxx and when mfhc1 is not available, use:
333 // spill + reload via ldc1
335 // The case where dmfc1 is available doesn't need to be handled here
336 // because it never creates a ExtractElementF64 node.
338 // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence
339 // for odd-numbered double precision values (because the lower 32-bits is
340 // transferred with mfc1 which is redirected to the upper half of the even
341 // register). Unfortunately, we have to make this decision before register
342 // allocation so for now we use a spill/reload sequence for all
343 // double-precision values in regardless of being an odd/even register.
345 if ((Subtarget.isABI_FPXX() && !Subtarget.hasMTHC1()) ||
346 (FP64 && !Subtarget.useOddSPReg())) {
347 unsigned DstReg = I->getOperand(0).getReg();
348 unsigned SrcReg = Op1.getReg();
349 unsigned N = Op2.getImm();
350 int64_t Offset = 4 * (Subtarget.isLittle() ? N : (1 - N));
352 // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are
353 // the cases where mfhc1 is not available). 64-bit architectures and
354 // MIPS32r2 or later can use FGR64 though.
355 assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() ||
356 !Subtarget.isFP64bit());
358 const TargetRegisterClass *RC =
359 FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass;
360 const TargetRegisterClass *RC2 = &Mips::GPR32RegClass;
362 // We re-use the same spill slot each time so that the stack frame doesn't
363 // grow too much in functions with a large number of moves.
364 int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(RC);
365 TII.storeRegToStack(MBB, I, SrcReg, Op1.isKill(), FI, RC, &RegInfo, 0);
366 TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, Offset);
373 MipsSEFrameLowering::MipsSEFrameLowering(const MipsSubtarget &STI)
374 : MipsFrameLowering(STI, STI.stackAlignment()) {}
376 void MipsSEFrameLowering::emitPrologue(MachineFunction &MF,
377 MachineBasicBlock &MBB) const {
378 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported");
379 MachineFrameInfo *MFI = MF.getFrameInfo();
380 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
382 const MipsSEInstrInfo &TII =
383 *static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo());
384 const MipsRegisterInfo &RegInfo =
385 *static_cast<const MipsRegisterInfo *>(STI.getRegisterInfo());
387 MachineBasicBlock::iterator MBBI = MBB.begin();
389 MipsABIInfo ABI = STI.getABI();
390 unsigned SP = ABI.GetStackPtr();
391 unsigned FP = ABI.GetFramePtr();
392 unsigned ZERO = ABI.GetNullPtr();
393 unsigned MOVE = ABI.GetGPRMoveOp();
394 unsigned ADDiu = ABI.GetPtrAddiuOp();
395 unsigned AND = ABI.IsN64() ? Mips::AND64 : Mips::AND;
397 const TargetRegisterClass *RC = ABI.ArePtrs64bit() ?
398 &Mips::GPR64RegClass : &Mips::GPR32RegClass;
400 // First, compute final stack size.
401 uint64_t StackSize = MFI->getStackSize();
403 // No need to allocate space on the stack.
404 if (StackSize == 0 && !MFI->adjustsStack()) return;
406 MachineModuleInfo &MMI = MF.getMMI();
407 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
408 MachineLocation DstML, SrcML;
411 TII.adjustStackPtr(SP, -StackSize, MBB, MBBI);
413 // emit ".cfi_def_cfa_offset StackSize"
414 unsigned CFIIndex = MMI.addFrameInst(
415 MCCFIInstruction::createDefCfaOffset(nullptr, -StackSize));
416 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
417 .addCFIIndex(CFIIndex);
419 if (MF.getFunction()->hasFnAttribute("interrupt"))
420 emitInterruptPrologueStub(MF, MBB);
422 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
425 // Find the instruction past the last instruction that saves a callee-saved
426 // register to the stack.
427 for (unsigned i = 0; i < CSI.size(); ++i)
430 // Iterate over list of callee-saved registers and emit .cfi_offset
432 for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(),
433 E = CSI.end(); I != E; ++I) {
434 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());
435 unsigned Reg = I->getReg();
437 // If Reg is a double precision register, emit two cfa_offsets,
438 // one for each of the paired single precision registers.
439 if (Mips::AFGR64RegClass.contains(Reg)) {
441 MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_lo), true);
443 MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_hi), true);
446 std::swap(Reg0, Reg1);
448 unsigned CFIIndex = MMI.addFrameInst(
449 MCCFIInstruction::createOffset(nullptr, Reg0, Offset));
450 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
451 .addCFIIndex(CFIIndex);
453 CFIIndex = MMI.addFrameInst(
454 MCCFIInstruction::createOffset(nullptr, Reg1, Offset + 4));
455 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
456 .addCFIIndex(CFIIndex);
457 } else if (Mips::FGR64RegClass.contains(Reg)) {
458 unsigned Reg0 = MRI->getDwarfRegNum(Reg, true);
459 unsigned Reg1 = MRI->getDwarfRegNum(Reg, true) + 1;
462 std::swap(Reg0, Reg1);
464 unsigned CFIIndex = MMI.addFrameInst(
465 MCCFIInstruction::createOffset(nullptr, Reg0, Offset));
466 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
467 .addCFIIndex(CFIIndex);
469 CFIIndex = MMI.addFrameInst(
470 MCCFIInstruction::createOffset(nullptr, Reg1, Offset + 4));
471 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
472 .addCFIIndex(CFIIndex);
474 // Reg is either in GPR32 or FGR32.
475 unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createOffset(
476 nullptr, MRI->getDwarfRegNum(Reg, 1), Offset));
477 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
478 .addCFIIndex(CFIIndex);
483 if (MipsFI->callsEhReturn()) {
484 // Insert instructions that spill eh data registers.
485 for (int I = 0; I < 4; ++I) {
486 if (!MBB.isLiveIn(ABI.GetEhDataReg(I)))
487 MBB.addLiveIn(ABI.GetEhDataReg(I));
488 TII.storeRegToStackSlot(MBB, MBBI, ABI.GetEhDataReg(I), false,
489 MipsFI->getEhDataRegFI(I), RC, &RegInfo);
492 // Emit .cfi_offset directives for eh data registers.
493 for (int I = 0; I < 4; ++I) {
494 int64_t Offset = MFI->getObjectOffset(MipsFI->getEhDataRegFI(I));
495 unsigned Reg = MRI->getDwarfRegNum(ABI.GetEhDataReg(I), true);
496 unsigned CFIIndex = MMI.addFrameInst(
497 MCCFIInstruction::createOffset(nullptr, Reg, Offset));
498 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
499 .addCFIIndex(CFIIndex);
503 // if framepointer enabled, set it to point to the stack pointer.
505 // Insert instruction "move $fp, $sp" at this location.
506 BuildMI(MBB, MBBI, dl, TII.get(MOVE), FP).addReg(SP).addReg(ZERO)
507 .setMIFlag(MachineInstr::FrameSetup);
509 // emit ".cfi_def_cfa_register $fp"
510 unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createDefCfaRegister(
511 nullptr, MRI->getDwarfRegNum(FP, true)));
512 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
513 .addCFIIndex(CFIIndex);
515 if (RegInfo.needsStackRealignment(MF)) {
516 // addiu $Reg, $zero, -MaxAlignment
517 // andi $sp, $sp, $Reg
518 unsigned VR = MF.getRegInfo().createVirtualRegister(RC);
519 assert(isInt<16>(MFI->getMaxAlignment()) &&
520 "Function's alignment size requirement is not supported.");
521 int MaxAlign = - (signed) MFI->getMaxAlignment();
523 BuildMI(MBB, MBBI, dl, TII.get(ADDiu), VR).addReg(ZERO) .addImm(MaxAlign);
524 BuildMI(MBB, MBBI, dl, TII.get(AND), SP).addReg(SP).addReg(VR);
528 unsigned BP = STI.isABI_N64() ? Mips::S7_64 : Mips::S7;
529 BuildMI(MBB, MBBI, dl, TII.get(MOVE), BP)
537 void MipsSEFrameLowering::emitInterruptPrologueStub(
538 MachineFunction &MF, MachineBasicBlock &MBB) const {
540 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
541 MachineBasicBlock::iterator MBBI = MBB.begin();
542 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
544 // Report an error the target doesn't support Mips32r2 or later.
545 // The epilogue relies on the use of the "ehb" to clear execution
546 // hazards. Pre R2 Mips relies on an implementation defined number
547 // of "ssnop"s to clear the execution hazard. Support for ssnop hazard
548 // clearing is not provided so reject that configuration.
549 if (!STI.hasMips32r2())
551 "\"interrupt\" attribute is not supported on pre-MIPS32R2 or "
554 // The GP register contains the "user" value, so we cannot perform
555 // any gp relative loads until we restore the "kernel" or "system" gp
556 // value. Until support is written we shall only accept the static
558 if ((STI.getRelocationModel() != Reloc::Static))
559 report_fatal_error("\"interrupt\" attribute is only supported for the "
560 "static relocation model on MIPS at the present time.");
562 if (!STI.isABI_O32() || STI.hasMips64())
563 report_fatal_error("\"interrupt\" attribute is only supported for the "
564 "O32 ABI on MIPS32R2+ at the present time.");
566 // Perform ISR handling like GCC
568 MF.getFunction()->getFnAttribute("interrupt").getValueAsString();
569 const TargetRegisterClass *PtrRC = &Mips::GPR32RegClass;
571 // EIC interrupt handling needs to read the Cause register to disable
573 if (IntKind == "eic") {
574 // Coprocessor registers are always live per se.
575 MBB.addLiveIn(Mips::COP013);
576 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MFC0), Mips::K0)
577 .addReg(Mips::COP013)
579 .setMIFlag(MachineInstr::FrameSetup);
581 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::EXT), Mips::K0)
585 .setMIFlag(MachineInstr::FrameSetup);
588 // Fetch and spill EPC
589 MBB.addLiveIn(Mips::COP014);
590 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MFC0), Mips::K1)
591 .addReg(Mips::COP014)
593 .setMIFlag(MachineInstr::FrameSetup);
595 STI.getInstrInfo()->storeRegToStack(MBB, MBBI, Mips::K1, false,
596 MipsFI->getISRRegFI(0), PtrRC,
597 STI.getRegisterInfo(), 0);
599 // Fetch and Spill Status
600 MBB.addLiveIn(Mips::COP012);
601 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MFC0), Mips::K1)
602 .addReg(Mips::COP012)
604 .setMIFlag(MachineInstr::FrameSetup);
606 STI.getInstrInfo()->storeRegToStack(MBB, MBBI, Mips::K1, false,
607 MipsFI->getISRRegFI(1), PtrRC,
608 STI.getRegisterInfo(), 0);
610 // Build the configuration for disabling lower priority interrupts. Non EIC
611 // interrupts need to be masked off with zero, EIC from the Cause register.
612 unsigned InsPosition = 8;
613 unsigned InsSize = 0;
614 unsigned SrcReg = Mips::ZERO;
616 // If the interrupt we're tied to is the EIC, switch the source for the
617 // masking off interrupts to the cause register.
618 if (IntKind == "eic") {
623 InsSize = StringSwitch<unsigned>(IntKind)
633 assert(InsSize != 0 && "Unknown interrupt type!");
635 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::INS), Mips::K1)
640 .setMIFlag(MachineInstr::FrameSetup);
642 // Mask off KSU, ERL, EXL
643 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::INS), Mips::K1)
648 .setMIFlag(MachineInstr::FrameSetup);
650 // Disable the FPU as we are not spilling those register sets.
651 if (!STI.useSoftFloat())
652 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::INS), Mips::K1)
657 .setMIFlag(MachineInstr::FrameSetup);
659 // Set the new status
660 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP012)
663 .setMIFlag(MachineInstr::FrameSetup);
666 void MipsSEFrameLowering::emitEpilogue(MachineFunction &MF,
667 MachineBasicBlock &MBB) const {
668 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
669 MachineFrameInfo *MFI = MF.getFrameInfo();
670 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
672 const MipsSEInstrInfo &TII =
673 *static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo());
674 const MipsRegisterInfo &RegInfo =
675 *static_cast<const MipsRegisterInfo *>(STI.getRegisterInfo());
677 DebugLoc DL = MBBI->getDebugLoc();
678 MipsABIInfo ABI = STI.getABI();
679 unsigned SP = ABI.GetStackPtr();
680 unsigned FP = ABI.GetFramePtr();
681 unsigned ZERO = ABI.GetNullPtr();
682 unsigned MOVE = ABI.GetGPRMoveOp();
684 // if framepointer enabled, restore the stack pointer.
686 // Find the first instruction that restores a callee-saved register.
687 MachineBasicBlock::iterator I = MBBI;
689 for (unsigned i = 0; i < MFI->getCalleeSavedInfo().size(); ++i)
692 // Insert instruction "move $sp, $fp" at this location.
693 BuildMI(MBB, I, DL, TII.get(MOVE), SP).addReg(FP).addReg(ZERO);
696 if (MipsFI->callsEhReturn()) {
697 const TargetRegisterClass *RC =
698 ABI.ArePtrs64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass;
700 // Find first instruction that restores a callee-saved register.
701 MachineBasicBlock::iterator I = MBBI;
702 for (unsigned i = 0; i < MFI->getCalleeSavedInfo().size(); ++i)
705 // Insert instructions that restore eh data registers.
706 for (int J = 0; J < 4; ++J) {
707 TII.loadRegFromStackSlot(MBB, I, ABI.GetEhDataReg(J),
708 MipsFI->getEhDataRegFI(J), RC, &RegInfo);
712 if (MF.getFunction()->hasFnAttribute("interrupt"))
713 emitInterruptEpilogueStub(MF, MBB);
715 // Get the number of bytes from FrameInfo
716 uint64_t StackSize = MFI->getStackSize();
722 TII.adjustStackPtr(SP, StackSize, MBB, MBBI);
725 void MipsSEFrameLowering::emitInterruptEpilogueStub(
726 MachineFunction &MF, MachineBasicBlock &MBB) const {
728 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
729 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
730 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
732 // Perform ISR handling like GCC
733 const TargetRegisterClass *PtrRC = &Mips::GPR32RegClass;
735 // Disable Interrupts.
736 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::DI), Mips::ZERO);
737 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::EHB));
740 STI.getInstrInfo()->loadRegFromStackSlot(MBB, MBBI, Mips::K1,
741 MipsFI->getISRRegFI(0), PtrRC,
742 STI.getRegisterInfo());
743 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP014)
748 STI.getInstrInfo()->loadRegFromStackSlot(MBB, MBBI, Mips::K1,
749 MipsFI->getISRRegFI(1), PtrRC,
750 STI.getRegisterInfo());
751 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP012)
756 int MipsSEFrameLowering::getFrameIndexReference(const MachineFunction &MF,
758 unsigned &FrameReg) const {
759 const MachineFrameInfo *MFI = MF.getFrameInfo();
760 MipsABIInfo ABI = STI.getABI();
762 if (MFI->isFixedObjectIndex(FI))
763 FrameReg = hasFP(MF) ? ABI.GetFramePtr() : ABI.GetStackPtr();
765 FrameReg = hasBP(MF) ? ABI.GetBasePtr() : ABI.GetStackPtr();
767 return MFI->getObjectOffset(FI) + MFI->getStackSize() -
768 getOffsetOfLocalArea() + MFI->getOffsetAdjustment();
771 bool MipsSEFrameLowering::
772 spillCalleeSavedRegisters(MachineBasicBlock &MBB,
773 MachineBasicBlock::iterator MI,
774 const std::vector<CalleeSavedInfo> &CSI,
775 const TargetRegisterInfo *TRI) const {
776 MachineFunction *MF = MBB.getParent();
777 MachineBasicBlock *EntryBlock = &MF->front();
778 const TargetInstrInfo &TII = *STI.getInstrInfo();
780 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
781 // Add the callee-saved register as live-in. Do not add if the register is
782 // RA and return address is taken, because it has already been added in
783 // method MipsTargetLowering::LowerRETURNADDR.
784 // It's killed at the spill, unless the register is RA and return address
786 unsigned Reg = CSI[i].getReg();
787 bool IsRAAndRetAddrIsTaken = (Reg == Mips::RA || Reg == Mips::RA_64)
788 && MF->getFrameInfo()->isReturnAddressTaken();
789 if (!IsRAAndRetAddrIsTaken)
790 EntryBlock->addLiveIn(Reg);
792 // ISRs require HI/LO to be spilled into kernel registers to be then
793 // spilled to the stack frame.
794 bool IsLOHI = (Reg == Mips::LO0 || Reg == Mips::LO0_64 ||
795 Reg == Mips::HI0 || Reg == Mips::HI0_64);
796 const Function *Func = MBB.getParent()->getFunction();
797 if (IsLOHI && Func->hasFnAttribute("interrupt")) {
798 DebugLoc DL = MI->getDebugLoc();
801 if (!STI.getABI().ArePtrs64bit()) {
802 Op = (Reg == Mips::HI0) ? Mips::MFHI : Mips::MFLO;
805 Op = (Reg == Mips::HI0) ? Mips::MFHI64 : Mips::MFLO64;
808 BuildMI(MBB, MI, DL, TII.get(Op), Mips::K0)
809 .setMIFlag(MachineInstr::FrameSetup);
812 // Insert the spill to the stack frame.
813 bool IsKill = !IsRAAndRetAddrIsTaken;
814 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
815 TII.storeRegToStackSlot(*EntryBlock, MI, Reg, IsKill,
816 CSI[i].getFrameIdx(), RC, TRI);
823 MipsSEFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
824 const MachineFrameInfo *MFI = MF.getFrameInfo();
826 // Reserve call frame if the size of the maximum call frame fits into 16-bit
827 // immediate field and there are no variable sized objects on the stack.
828 // Make sure the second register scavenger spill slot can be accessed with one
830 return isInt<16>(MFI->getMaxCallFrameSize() + getStackAlignment()) &&
831 !MFI->hasVarSizedObjects();
834 /// Mark \p Reg and all registers aliasing it in the bitset.
835 static void setAliasRegs(MachineFunction &MF, BitVector &SavedRegs,
837 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
838 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
842 void MipsSEFrameLowering::determineCalleeSaves(MachineFunction &MF,
843 BitVector &SavedRegs,
844 RegScavenger *RS) const {
845 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
846 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
847 MipsABIInfo ABI = STI.getABI();
848 unsigned FP = ABI.GetFramePtr();
849 unsigned BP = ABI.IsN64() ? Mips::S7_64 : Mips::S7;
851 // Mark $fp as used if function has dedicated frame pointer.
853 setAliasRegs(MF, SavedRegs, FP);
854 // Mark $s7 as used if function has dedicated base pointer.
856 setAliasRegs(MF, SavedRegs, BP);
858 // Create spill slots for eh data registers if function calls eh_return.
859 if (MipsFI->callsEhReturn())
860 MipsFI->createEhDataRegsFI();
862 // Create spill slots for Coprocessor 0 registers if function is an ISR.
864 MipsFI->createISRRegFI();
866 // Expand pseudo instructions which load, store or copy accumulators.
867 // Add an emergency spill slot if a pseudo was expanded.
868 if (ExpandPseudo(MF).expand()) {
869 // The spill slot should be half the size of the accumulator. If target is
870 // mips64, it should be 64-bit, otherwise it should be 32-bt.
871 const TargetRegisterClass *RC = STI.hasMips64() ?
872 &Mips::GPR64RegClass : &Mips::GPR32RegClass;
873 int FI = MF.getFrameInfo()->CreateStackObject(RC->getSize(),
874 RC->getAlignment(), false);
875 RS->addScavengingFrameIndex(FI);
878 // Set scavenging frame index if necessary.
879 uint64_t MaxSPOffset = MF.getInfo<MipsFunctionInfo>()->getIncomingArgSize() +
880 estimateStackSize(MF);
882 if (isInt<16>(MaxSPOffset))
885 const TargetRegisterClass *RC =
886 ABI.ArePtrs64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass;
887 int FI = MF.getFrameInfo()->CreateStackObject(RC->getSize(),
888 RC->getAlignment(), false);
889 RS->addScavengingFrameIndex(FI);
892 const MipsFrameLowering *
893 llvm::createMipsSEFrameLowering(const MipsSubtarget &ST) {
894 return new MipsSEFrameLowering(ST);