1 //===-- X86RegisterInfo.cpp - X86 Register Information --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the X86 implementation of the TargetRegisterInfo class.
11 // This file is responsible for the frame pointer elimination optimization
14 //===----------------------------------------------------------------------===//
16 #include "X86RegisterInfo.h"
17 #include "X86FrameLowering.h"
18 #include "X86InstrBuilder.h"
19 #include "X86MachineFunctionInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/ADT/BitVector.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineFunctionPass.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineModuleInfo.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/IR/Constants.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/Type.h"
33 #include "llvm/MC/MCAsmInfo.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Target/TargetFrameLowering.h"
37 #include "llvm/Target/TargetInstrInfo.h"
38 #include "llvm/Target/TargetMachine.h"
39 #include "llvm/Target/TargetOptions.h"
43 #define GET_REGINFO_TARGET_DESC
44 #include "X86GenRegisterInfo.inc"
47 EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true),
48 cl::desc("Enable use of a base pointer for complex stack frames"));
50 X86RegisterInfo::X86RegisterInfo(const Triple &TT)
51 : X86GenRegisterInfo((TT.isArch64Bit() ? X86::RIP : X86::EIP),
52 X86_MC::getDwarfRegFlavour(TT, false),
53 X86_MC::getDwarfRegFlavour(TT, true),
54 (TT.isArch64Bit() ? X86::RIP : X86::EIP)) {
55 X86_MC::InitLLVM2SEHRegisterMapping(this);
57 // Cache some information.
58 Is64Bit = TT.isArch64Bit();
59 IsWin64 = Is64Bit && TT.isOSWindows();
61 // Use a callee-saved register as the base pointer. These registers must
62 // not conflict with any ABI requirements. For example, in 32-bit mode PIC
63 // requires GOT in the EBX register before function calls via PLT GOT pointer.
66 // This matches the simplified 32-bit pointer code in the data layout
68 // FIXME: Should use the data layout?
69 bool Use64BitReg = TT.getEnvironment() != Triple::GNUX32;
70 StackPtr = Use64BitReg ? X86::RSP : X86::ESP;
71 FramePtr = Use64BitReg ? X86::RBP : X86::EBP;
72 BasePtr = Use64BitReg ? X86::RBX : X86::EBX;
82 X86RegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
83 // ExeDepsFixer and PostRAScheduler require liveness.
88 X86RegisterInfo::getSEHRegNum(unsigned i) const {
89 return getEncodingValue(i);
92 const TargetRegisterClass *
93 X86RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC,
95 // The sub_8bit sub-register index is more constrained in 32-bit mode.
96 // It behaves just like the sub_8bit_hi index.
97 if (!Is64Bit && Idx == X86::sub_8bit)
98 Idx = X86::sub_8bit_hi;
100 // Forward to TableGen's default version.
101 return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
104 const TargetRegisterClass *
105 X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
106 const TargetRegisterClass *B,
107 unsigned SubIdx) const {
108 // The sub_8bit sub-register index is more constrained in 32-bit mode.
109 if (!Is64Bit && SubIdx == X86::sub_8bit) {
110 A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi);
114 return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx);
117 const TargetRegisterClass *
118 X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
119 const MachineFunction &MF) const {
120 // Don't allow super-classes of GR8_NOREX. This class is only used after
121 // extracting sub_8bit_hi sub-registers. The H sub-registers cannot be copied
122 // to the full GR8 register class in 64-bit mode, so we cannot allow the
123 // reigster class inflation.
125 // The GR8_NOREX class is always used in a way that won't be constrained to a
126 // sub-class, so sub-classes like GR8_ABCD_L are allowed to expand to the
128 if (RC == &X86::GR8_NOREXRegClass)
131 const TargetRegisterClass *Super = RC;
132 TargetRegisterClass::sc_iterator I = RC->getSuperClasses();
134 switch (Super->getID()) {
135 case X86::GR8RegClassID:
136 case X86::GR16RegClassID:
137 case X86::GR32RegClassID:
138 case X86::GR64RegClassID:
139 case X86::FR32RegClassID:
140 case X86::FR64RegClassID:
141 case X86::RFP32RegClassID:
142 case X86::RFP64RegClassID:
143 case X86::RFP80RegClassID:
144 case X86::VR128RegClassID:
145 case X86::VR256RegClassID:
146 // Don't return a super-class that would shrink the spill size.
147 // That can happen with the vector and float classes.
148 if (Super->getSize() == RC->getSize())
156 const TargetRegisterClass *
157 X86RegisterInfo::getPointerRegClass(const MachineFunction &MF,
158 unsigned Kind) const {
159 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
161 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
162 case 0: // Normal GPRs.
163 if (Subtarget.isTarget64BitLP64())
164 return &X86::GR64RegClass;
165 return &X86::GR32RegClass;
166 case 1: // Normal GPRs except the stack pointer (for encoding reasons).
167 if (Subtarget.isTarget64BitLP64())
168 return &X86::GR64_NOSPRegClass;
169 return &X86::GR32_NOSPRegClass;
170 case 2: // NOREX GPRs.
171 if (Subtarget.isTarget64BitLP64())
172 return &X86::GR64_NOREXRegClass;
173 return &X86::GR32_NOREXRegClass;
174 case 3: // NOREX GPRs except the stack pointer (for encoding reasons).
175 if (Subtarget.isTarget64BitLP64())
176 return &X86::GR64_NOREX_NOSPRegClass;
177 return &X86::GR32_NOREX_NOSPRegClass;
178 case 4: // Available for tailcall (not callee-saved GPRs).
179 return getGPRsForTailCall(MF);
183 const TargetRegisterClass *
184 X86RegisterInfo::getGPRsForTailCall(const MachineFunction &MF) const {
185 const Function *F = MF.getFunction();
186 if (IsWin64 || (F && F->getCallingConv() == CallingConv::X86_64_Win64))
187 return &X86::GR64_TCW64RegClass;
189 return &X86::GR64_TCRegClass;
191 bool hasHipeCC = (F ? F->getCallingConv() == CallingConv::HiPE : false);
193 return &X86::GR32RegClass;
194 return &X86::GR32_TCRegClass;
197 const TargetRegisterClass *
198 X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
199 if (RC == &X86::CCRRegClass) {
201 return &X86::GR64RegClass;
203 return &X86::GR32RegClass;
209 X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
210 MachineFunction &MF) const {
211 const X86FrameLowering *TFI = getFrameLowering(MF);
213 unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0;
214 switch (RC->getID()) {
217 case X86::GR32RegClassID:
219 case X86::GR64RegClassID:
221 case X86::VR128RegClassID:
222 return Is64Bit ? 10 : 4;
223 case X86::VR64RegClassID:
229 X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
230 const X86Subtarget &Subtarget = MF->getSubtarget<X86Subtarget>();
231 bool HasSSE = Subtarget.hasSSE1();
232 bool HasAVX = Subtarget.hasAVX();
233 bool HasAVX512 = Subtarget.hasAVX512();
234 bool CallsEHReturn = MF->getMMI().callsEHReturn();
236 assert(MF && "MachineFunction required");
237 switch (MF->getFunction()->getCallingConv()) {
238 case CallingConv::GHC:
239 case CallingConv::HiPE:
240 return CSR_NoRegs_SaveList;
241 case CallingConv::AnyReg:
243 return CSR_64_AllRegs_AVX_SaveList;
244 return CSR_64_AllRegs_SaveList;
245 case CallingConv::PreserveMost:
246 return CSR_64_RT_MostRegs_SaveList;
247 case CallingConv::PreserveAll:
249 return CSR_64_RT_AllRegs_AVX_SaveList;
250 return CSR_64_RT_AllRegs_SaveList;
251 case CallingConv::CXX_FAST_TLS:
253 return CSR_64_TLS_Darwin_SaveList;
255 case CallingConv::Intel_OCL_BI: {
256 if (HasAVX512 && IsWin64)
257 return CSR_Win64_Intel_OCL_BI_AVX512_SaveList;
258 if (HasAVX512 && Is64Bit)
259 return CSR_64_Intel_OCL_BI_AVX512_SaveList;
260 if (HasAVX && IsWin64)
261 return CSR_Win64_Intel_OCL_BI_AVX_SaveList;
262 if (HasAVX && Is64Bit)
263 return CSR_64_Intel_OCL_BI_AVX_SaveList;
264 if (!HasAVX && !IsWin64 && Is64Bit)
265 return CSR_64_Intel_OCL_BI_SaveList;
268 case CallingConv::HHVM:
269 return CSR_64_HHVM_SaveList;
270 case CallingConv::Cold:
272 return CSR_64_MostRegs_SaveList;
274 case CallingConv::X86_64_Win64:
275 return CSR_Win64_SaveList;
276 case CallingConv::X86_64_SysV:
278 return CSR_64EHRet_SaveList;
279 return CSR_64_SaveList;
280 case CallingConv::X86_INTR:
283 return CSR_64_AllRegs_AVX_SaveList;
285 return CSR_64_AllRegs_SaveList;
288 return CSR_32_AllRegs_SSE_SaveList;
290 return CSR_32_AllRegs_SaveList;
298 return CSR_Win64_SaveList;
300 return CSR_64EHRet_SaveList;
301 return CSR_64_SaveList;
304 return CSR_32EHRet_SaveList;
305 return CSR_32_SaveList;
309 X86RegisterInfo::getCallPreservedMask(const MachineFunction &MF,
310 CallingConv::ID CC) const {
311 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
312 bool HasSSE = Subtarget.hasSSE1();
313 bool HasAVX = Subtarget.hasAVX();
314 bool HasAVX512 = Subtarget.hasAVX512();
317 case CallingConv::GHC:
318 case CallingConv::HiPE:
319 return CSR_NoRegs_RegMask;
320 case CallingConv::AnyReg:
322 return CSR_64_AllRegs_AVX_RegMask;
323 return CSR_64_AllRegs_RegMask;
324 case CallingConv::PreserveMost:
325 return CSR_64_RT_MostRegs_RegMask;
326 case CallingConv::PreserveAll:
328 return CSR_64_RT_AllRegs_AVX_RegMask;
329 return CSR_64_RT_AllRegs_RegMask;
330 case CallingConv::CXX_FAST_TLS:
332 return CSR_64_TLS_Darwin_RegMask;
334 case CallingConv::Intel_OCL_BI: {
335 if (HasAVX512 && IsWin64)
336 return CSR_Win64_Intel_OCL_BI_AVX512_RegMask;
337 if (HasAVX512 && Is64Bit)
338 return CSR_64_Intel_OCL_BI_AVX512_RegMask;
339 if (HasAVX && IsWin64)
340 return CSR_Win64_Intel_OCL_BI_AVX_RegMask;
341 if (HasAVX && Is64Bit)
342 return CSR_64_Intel_OCL_BI_AVX_RegMask;
343 if (!HasAVX && !IsWin64 && Is64Bit)
344 return CSR_64_Intel_OCL_BI_RegMask;
347 case CallingConv::HHVM:
348 return CSR_64_HHVM_RegMask;
349 case CallingConv::Cold:
351 return CSR_64_MostRegs_RegMask;
353 case CallingConv::X86_64_Win64:
354 return CSR_Win64_RegMask;
355 case CallingConv::X86_64_SysV:
356 return CSR_64_RegMask;
357 case CallingConv::X86_INTR:
360 return CSR_64_AllRegs_AVX_RegMask;
362 return CSR_64_AllRegs_RegMask;
365 return CSR_32_AllRegs_SSE_RegMask;
367 return CSR_32_AllRegs_RegMask;
373 // Unlike getCalleeSavedRegs(), we don't have MMI so we can't check
377 return CSR_Win64_RegMask;
378 return CSR_64_RegMask;
380 return CSR_32_RegMask;
384 X86RegisterInfo::getNoPreservedMask() const {
385 return CSR_NoRegs_RegMask;
388 const uint32_t *X86RegisterInfo::getDarwinTLSCallPreservedMask() const {
389 return CSR_64_TLS_Darwin_RegMask;
392 BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
393 BitVector Reserved(getNumRegs());
394 const X86FrameLowering *TFI = getFrameLowering(MF);
396 // Set the stack-pointer register and its aliases as reserved.
397 for (MCSubRegIterator I(X86::RSP, this, /*IncludeSelf=*/true); I.isValid();
401 // Set the instruction pointer register and its aliases as reserved.
402 for (MCSubRegIterator I(X86::RIP, this, /*IncludeSelf=*/true); I.isValid();
406 // Set the frame-pointer register and its aliases as reserved if needed.
407 if (TFI->hasFP(MF)) {
408 for (MCSubRegIterator I(X86::RBP, this, /*IncludeSelf=*/true); I.isValid();
413 // Set the base-pointer register and its aliases as reserved if needed.
414 if (hasBasePointer(MF)) {
415 CallingConv::ID CC = MF.getFunction()->getCallingConv();
416 const uint32_t *RegMask = getCallPreservedMask(MF, CC);
417 if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister()))
419 "Stack realignment in presence of dynamic allocas is not supported with"
420 "this calling convention.");
422 unsigned BasePtr = getX86SubSuperRegister(getBaseRegister(), 64);
423 for (MCSubRegIterator I(BasePtr, this, /*IncludeSelf=*/true);
428 // Mark the segment registers as reserved.
429 Reserved.set(X86::CS);
430 Reserved.set(X86::SS);
431 Reserved.set(X86::DS);
432 Reserved.set(X86::ES);
433 Reserved.set(X86::FS);
434 Reserved.set(X86::GS);
436 // Mark the floating point stack registers as reserved.
437 for (unsigned n = 0; n != 8; ++n)
438 Reserved.set(X86::ST0 + n);
440 // Reserve the registers that only exist in 64-bit mode.
442 // These 8-bit registers are part of the x86-64 extension even though their
443 // super-registers are old 32-bits.
444 Reserved.set(X86::SIL);
445 Reserved.set(X86::DIL);
446 Reserved.set(X86::BPL);
447 Reserved.set(X86::SPL);
449 for (unsigned n = 0; n != 8; ++n) {
451 for (MCRegAliasIterator AI(X86::R8 + n, this, true); AI.isValid(); ++AI)
455 for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI)
459 if (!Is64Bit || !MF.getSubtarget<X86Subtarget>().hasAVX512()) {
460 for (unsigned n = 16; n != 32; ++n) {
461 for (MCRegAliasIterator AI(X86::XMM0 + n, this, true); AI.isValid(); ++AI)
469 void X86RegisterInfo::adjustStackMapLiveOutMask(uint32_t *Mask) const {
470 // Check if the EFLAGS register is marked as live-out. This shouldn't happen,
471 // because the calling convention defines the EFLAGS register as NOT
474 // Unfortunatelly the EFLAGS show up as live-out after branch folding. Adding
475 // an assert to track this and clear the register afterwards to avoid
476 // unnecessary crashes during release builds.
477 assert(!(Mask[X86::EFLAGS / 32] & (1U << (X86::EFLAGS % 32))) &&
478 "EFLAGS are not live-out from a patchpoint.");
480 // Also clean other registers that don't need preserving (IP).
481 for (auto Reg : {X86::EFLAGS, X86::RIP, X86::EIP, X86::IP})
482 Mask[Reg / 32] &= ~(1U << (Reg % 32));
485 //===----------------------------------------------------------------------===//
486 // Stack Frame Processing methods
487 //===----------------------------------------------------------------------===//
489 static bool CantUseSP(const MachineFrameInfo *MFI) {
490 return MFI->hasVarSizedObjects() || MFI->hasOpaqueSPAdjustment();
493 bool X86RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
494 const MachineFrameInfo *MFI = MF.getFrameInfo();
496 if (!EnableBasePointer)
499 // When we need stack realignment, we can't address the stack from the frame
500 // pointer. When we have dynamic allocas or stack-adjusting inline asm, we
501 // can't address variables from the stack pointer. MS inline asm can
502 // reference locals while also adjusting the stack pointer. When we can't
503 // use both the SP and the FP, we need a separate base pointer register.
504 bool CantUseFP = needsStackRealignment(MF);
505 return CantUseFP && CantUseSP(MFI);
508 bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
509 if (!TargetRegisterInfo::canRealignStack(MF))
512 const MachineFrameInfo *MFI = MF.getFrameInfo();
513 const MachineRegisterInfo *MRI = &MF.getRegInfo();
515 // Stack realignment requires a frame pointer. If we already started
516 // register allocation with frame pointer elimination, it is too late now.
517 if (!MRI->canReserveReg(FramePtr))
520 // If a base pointer is necessary. Check that it isn't too late to reserve
523 return MRI->canReserveReg(BasePtr);
527 bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
528 unsigned Reg, int &FrameIdx) const {
529 // Since X86 defines assignCalleeSavedSpillSlots which always return true
530 // this function neither used nor tested.
531 llvm_unreachable("Unused function on X86. Otherwise need a test case.");
535 X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
536 int SPAdj, unsigned FIOperandNum,
537 RegScavenger *RS) const {
538 MachineInstr &MI = *II;
539 MachineFunction &MF = *MI.getParent()->getParent();
540 const X86FrameLowering *TFI = getFrameLowering(MF);
541 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
544 unsigned Opc = MI.getOpcode();
545 bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm ||
546 Opc == X86::TCRETURNmi || Opc == X86::TCRETURNmi64;
548 if (hasBasePointer(MF))
549 BasePtr = (FrameIndex < 0 ? FramePtr : getBaseRegister());
550 else if (needsStackRealignment(MF))
551 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr);
555 BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr);
557 // LOCAL_ESCAPE uses a single offset, with no register. It only works in the
558 // simple FP case, and doesn't work with stack realignment. On 32-bit, the
559 // offset is from the traditional base pointer location. On 64-bit, the
560 // offset is from the SP at the end of the prologue, not the FP location. This
561 // matches the behavior of llvm.frameaddress.
562 unsigned IgnoredFrameReg;
563 if (Opc == TargetOpcode::LOCAL_ESCAPE) {
564 MachineOperand &FI = MI.getOperand(FIOperandNum);
566 Offset = TFI->getFrameIndexReference(MF, FrameIndex, IgnoredFrameReg);
567 FI.ChangeToImmediate(Offset);
571 // For LEA64_32r when BasePtr is 32-bits (X32) we can use full-size 64-bit
572 // register as source operand, semantic is the same and destination is
573 // 32-bits. It saves one byte per lea in code since 0x67 prefix is avoided.
574 if (Opc == X86::LEA64_32r && X86::GR32RegClass.contains(BasePtr))
575 BasePtr = getX86SubSuperRegister(BasePtr, 64);
577 // This must be part of a four operand memory reference. Replace the
578 // FrameIndex with base register with EBP. Add an offset to the offset.
579 MI.getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
581 // Now add the frame object offset to the offset from EBP.
584 // Tail call jmp happens after FP is popped.
585 const MachineFrameInfo *MFI = MF.getFrameInfo();
586 FIOffset = MFI->getObjectOffset(FrameIndex) - TFI->getOffsetOfLocalArea();
588 FIOffset = TFI->getFrameIndexReference(MF, FrameIndex, IgnoredFrameReg);
590 if (BasePtr == StackPtr)
593 // The frame index format for stackmaps and patchpoints is different from the
594 // X86 format. It only has a FI and an offset.
595 if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
596 assert(BasePtr == FramePtr && "Expected the FP as base register");
597 int64_t Offset = MI.getOperand(FIOperandNum + 1).getImm() + FIOffset;
598 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
602 if (MI.getOperand(FIOperandNum+3).isImm()) {
603 // Offset is a 32-bit integer.
604 int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm());
605 int Offset = FIOffset + Imm;
606 assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) &&
607 "Requesting 64-bit offset in 32-bit immediate!");
608 MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset);
610 // Offset is symbolic. This is extremely rare.
611 uint64_t Offset = FIOffset +
612 (uint64_t)MI.getOperand(FIOperandNum+3).getOffset();
613 MI.getOperand(FIOperandNum + 3).setOffset(Offset);
617 unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
618 const X86FrameLowering *TFI = getFrameLowering(MF);
619 return TFI->hasFP(MF) ? FramePtr : StackPtr;
623 X86RegisterInfo::getPtrSizedFrameRegister(const MachineFunction &MF) const {
624 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
625 unsigned FrameReg = getFrameRegister(MF);
626 if (Subtarget.isTarget64BitILP32())
627 FrameReg = getX86SubSuperRegister(FrameReg, 32);
631 unsigned llvm::get512BitSuperRegister(unsigned Reg) {
632 if (Reg >= X86::XMM0 && Reg <= X86::XMM31)
633 return X86::ZMM0 + (Reg - X86::XMM0);
634 if (Reg >= X86::YMM0 && Reg <= X86::YMM31)
635 return X86::ZMM0 + (Reg - X86::YMM0);
636 if (Reg >= X86::ZMM0 && Reg <= X86::ZMM31)
638 llvm_unreachable("Unexpected SIMD register");